From 7b25a54653e5f7a91d50b3d4fb8ce044f34a2d41 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 19 Jan 2021 18:29:09 +0100 Subject: [PATCH] Update gomod and vendor --- go.mod | 27 +- go.sum | 171 +- .../containerd/cgroups/stats/v1/metrics.pb.go | 584 +- .../cgroups/stats/v1/metrics.pb.txt | 46 + .../containerd/cgroups/stats/v1/metrics.proto | 15 + .../github.com/containerd/console/.travis.yml | 27 + vendor/github.com/containerd/console/LICENSE | 191 + .../github.com/containerd/console/README.md | 27 + .../github.com/containerd/console/console.go | 81 + .../containerd/console/console_linux.go | 280 + .../containerd/console/console_unix.go | 158 + .../containerd/console/console_windows.go | 216 + vendor/github.com/containerd/console/go.mod | 8 + vendor/github.com/containerd/console/go.sum | 4 + .../containerd/console/tc_darwin.go | 53 + .../containerd/console/tc_freebsd.go | 45 + .../github.com/containerd/console/tc_linux.go | 49 + .../containerd/console/tc_openbsd_cgo.go | 51 + .../containerd/console/tc_openbsd_nocgo.go | 47 + .../containerd/console/tc_solaris_cgo.go | 51 + .../containerd/console/tc_solaris_nocgo.go | 47 + .../github.com/containerd/console/tc_unix.go | 91 + .../api/services/content/v1/content.pb.go | 5479 ++++++++++ .../api/services/content/v1/content.proto | 318 + .../archive/compression/compression.go | 266 + .../containerd/containerd/archive/tar_unix.go | 5 +- .../containerd/containers/containers.go | 112 + .../containerd/containerd/content/content.go | 182 + .../containerd/containerd/content/helpers.go | 237 + .../containerd/content/local/locks.go | 51 + .../containerd/content/local/readerat.go | 40 + .../containerd/content/local/store.go | 678 ++ .../local/store_unix.go} | 30 +- .../containerd/content/local/store_windows.go | 26 + .../containerd/content/local/writer.go | 207 + .../content/proxy/content_reader.go | 65 + .../containerd/content/proxy/content_store.go | 234 + .../content/proxy/content_writer.go | 139 + .../containerd/contrib/seccomp/seccomp.go | 54 + .../contrib/seccomp/seccomp_default.go | 585 + .../seccomp/seccomp_default_unsupported.go | 26 + .../containerd/containerd/diff/apply/apply.go | 131 + .../containerd/diff/apply/apply_linux.go | 134 + .../containerd/diff/apply/apply_other.go | 34 + .../containerd/containerd/diff/diff.go | 107 + .../containerd/containerd/diff/stream.go | 187 + .../containerd/containerd/diff/stream_unix.go | 146 + .../containerd/diff/stream_windows.go | 165 + .../containerd/diff/walking/differ.go | 184 + .../containerd/containerd/events/events.go | 81 + .../containerd/events/exchange/exchange.go | 251 + .../containerd/containerd/filters/adaptor.go | 33 + .../containerd/containerd/filters/filter.go | 179 + .../containerd/containerd/filters/parser.go | 292 + .../containerd/containerd/filters/quote.go | 253 + .../containerd/containerd/filters/scanner.go | 297 + .../github.com/containerd/containerd/gc/gc.go | 189 + .../containerd/identifiers/validate.go | 73 + .../containerd/images/annotations.go | 23 + .../containerd/images/archive/exporter.go | 468 + .../containerd/images/archive/importer.go | 371 + .../containerd/images/archive/reference.go | 112 + .../containerd/containerd/images/handlers.go | 263 + .../containerd/containerd/images/image.go | 386 + .../containerd/images/importexport.go | 37 + .../containerd/images/mediatypes.go | 126 + .../containerd/containerd/labels/validate.go | 37 + .../containerd/containerd/leases/context.go | 40 + .../containerd/containerd/leases/grpc.go | 58 + .../containerd/containerd/leases/id.go | 43 + .../containerd/containerd/leases/lease.go | 86 + .../containerd/containerd/log/context.go | 30 - .../containerd/metadata/adaptors.go | 178 + .../containerd/containerd/metadata/bolt.go | 61 + .../containerd/metadata/boltutil/helpers.go | 147 + .../containerd/containerd/metadata/buckets.go | 272 + .../containerd/metadata/containers.go | 457 + .../containerd/containerd/metadata/content.go | 891 ++ .../containerd/containerd/metadata/db.go | 444 + .../containerd/containerd/metadata/gc.go | 513 + .../containerd/containerd/metadata/images.go | 386 + .../containerd/containerd/metadata/leases.go | 495 + .../containerd/metadata/migrations.go | 168 + .../containerd/metadata/namespaces.go | 195 + .../containerd/metadata/snapshot.go | 940 ++ .../containerd/mount/lookup_unix.go | 53 + .../containerd/mount/lookup_unsupported.go | 29 + .../containerd/containerd/mount/mount.go | 40 + .../containerd/mount/mount_linux.go | 372 + .../containerd/containerd/mount/mount_unix.go | 41 + .../containerd/mount/mount_windows.go | 105 + .../containerd/containerd/mount/mountinfo.go | 56 + .../containerd/mount/mountinfo_bsd.go | 63 + .../containerd/mount/mountinfo_linux.go | 145 + .../containerd/mount/mountinfo_unsupported.go | 34 + .../containerd/containerd/mount/temp.go | 73 + .../containerd/containerd/mount/temp_unix.go | 64 + .../containerd/mount/temp_unsupported.go | 29 + .../containerd/namespaces/context.go | 78 + .../containerd/containerd/namespaces/grpc.go | 61 + .../containerd/containerd/namespaces/store.go | 46 + .../containerd/containerd/namespaces/ttrpc.go | 51 + .../containerd/containerd/oci/client.go | 38 + .../containerd/containerd/oci/spec.go | 253 + .../containerd/containerd/oci/spec_opts.go | 1258 +++ .../containerd/oci/spec_opts_linux.go | 121 + .../containerd/oci/spec_opts_unix.go | 120 + .../containerd/oci/spec_opts_windows.go | 79 + .../containerd/platforms/compare.go | 229 + .../containerd/platforms/cpuinfo.go | 122 + .../containerd/platforms/database.go | 114 + .../containerd/platforms/defaults.go | 38 + .../containerd/platforms/defaults_unix.go | 24 + .../containerd/platforms/defaults_windows.go | 31 + .../containerd/platforms/platforms.go | 278 + .../containerd/containerd/plugin/context.go | 146 + .../containerd/containerd/plugin/plugin.go | 239 + .../containerd/plugin/plugin_go18.go | 62 + .../containerd/plugin/plugin_other.go | 24 + .../containerd/reference/docker/reference.go | 797 ++ .../containerd/reference/reference.go | 162 + .../containerd/remotes/docker/auth.go | 198 + .../containerd/remotes/docker/authorizer.go | 482 + .../containerd/remotes/docker/converter.go | 88 + .../containerd/remotes/docker/errcode.go | 283 + .../containerd/remotes/docker/errdesc.go | 154 + .../containerd/remotes/docker/fetcher.go | 202 + .../containerd/remotes/docker/handler.go | 154 + .../remotes/docker/httpreadseeker.go | 144 + .../containerd/remotes/docker/pusher.go | 392 + .../containerd/remotes/docker/registry.go | 202 + .../containerd/remotes/docker/resolver.go | 611 ++ .../remotes/docker/schema1/converter.go | 601 + .../containerd/remotes/docker/scope.go | 97 + .../containerd/remotes/docker/status.go | 67 + .../containerd/containerd/remotes/handlers.go | 308 + .../containerd/containerd/remotes/resolver.go | 80 + .../containerd/containerd/rootfs/apply.go | 187 + .../containerd/containerd/rootfs/diff.go | 70 + .../containerd/containerd/rootfs/init.go | 117 + .../containerd/rootfs/init_linux.go | 130 + .../containerd/rootfs/init_other.go | 23 + .../content/contentserver/contentserver.go | 463 + .../containerd/snapshots/native/native.go | 356 + .../containerd/snapshots/overlay/check.go | 88 + .../containerd/snapshots/overlay/overlay.go | 513 + .../containerd/snapshots/snapshotter.go | 381 + .../containerd/snapshots/storage/bolt.go | 648 ++ .../containerd/snapshots/storage/metastore.go | 115 + .../containerd/containerd/sys/mount_linux.go | 52 +- .../containerd/containerd/sys/socket_unix.go | 4 +- .../containerd/containerd/version/version.go | 34 + .../github.com/containerd/go-cni/.travis.yml | 23 + vendor/github.com/containerd/go-cni/LICENSE | 201 + vendor/github.com/containerd/go-cni/README.md | 60 + vendor/github.com/containerd/go-cni/cni.go | 220 + vendor/github.com/containerd/go-cni/errors.go | 55 + vendor/github.com/containerd/go-cni/go.mod | 14 + vendor/github.com/containerd/go-cni/go.sum | 39 + vendor/github.com/containerd/go-cni/helper.go | 41 + .../github.com/containerd/go-cni/namespace.go | 77 + .../containerd/go-cni/namespace_opts.go | 75 + vendor/github.com/containerd/go-cni/opts.go | 263 + vendor/github.com/containerd/go-cni/result.go | 106 + .../github.com/containerd/go-cni/testutils.go | 78 + vendor/github.com/containerd/go-cni/types.go | 65 + .../github.com/containerd/go-runc/.travis.yml | 20 + vendor/github.com/containerd/go-runc/LICENSE | 201 + .../github.com/containerd/go-runc/README.md | 25 + .../containerd/go-runc/command_linux.go | 56 + .../containerd/go-runc/command_other.go | 35 + .../github.com/containerd/go-runc/console.go | 165 + .../containerd/go-runc/container.go | 30 + .../github.com/containerd/go-runc/events.go | 100 + vendor/github.com/containerd/go-runc/go.mod | 10 + vendor/github.com/containerd/go-runc/go.sum | 9 + vendor/github.com/containerd/go-runc/io.go | 218 + .../github.com/containerd/go-runc/io_unix.go | 76 + .../containerd/go-runc/io_windows.go | 62 + .../github.com/containerd/go-runc/monitor.go | 76 + vendor/github.com/containerd/go-runc/runc.go | 715 ++ vendor/github.com/containerd/go-runc/utils.go | 111 + vendor/github.com/containerd/ttrpc/.gitignore | 14 + .../github.com/containerd/ttrpc/.travis.yml | 23 + vendor/github.com/containerd/ttrpc/LICENSE | 201 + vendor/github.com/containerd/ttrpc/README.md | 62 + vendor/github.com/containerd/ttrpc/channel.go | 153 + vendor/github.com/containerd/ttrpc/client.go | 353 + vendor/github.com/containerd/ttrpc/codec.go | 42 + vendor/github.com/containerd/ttrpc/config.go | 52 + vendor/github.com/containerd/ttrpc/go.mod | 14 + vendor/github.com/containerd/ttrpc/go.sum | 85 + .../github.com/containerd/ttrpc/handshake.go | 50 + .../containerd/ttrpc/interceptor.go | 50 + .../github.com/containerd/ttrpc/metadata.go | 107 + vendor/github.com/containerd/ttrpc/server.go | 485 + .../github.com/containerd/ttrpc/services.go | 165 + vendor/github.com/containerd/ttrpc/types.go | 63 + .../containerd/ttrpc/unixcreds_linux.go | 108 + .../github.com/containerd/typeurl/.gitignore | 2 + .../github.com/containerd/typeurl/.travis.yml | 19 + vendor/github.com/containerd/typeurl/LICENSE | 191 + .../github.com/containerd/typeurl/README.md | 19 + vendor/github.com/containerd/typeurl/doc.go | 83 + vendor/github.com/containerd/typeurl/go.mod | 8 + vendor/github.com/containerd/typeurl/go.sum | 7 + vendor/github.com/containerd/typeurl/types.go | 168 + .../containernetworking/cni/LICENSE | 202 + .../containernetworking/cni/libcni/api.go | 497 + .../containernetworking/cni/libcni/conf.go | 268 + .../cni/pkg/invoke/args.go | 128 + .../cni/pkg/invoke/delegate.go | 80 + .../cni/pkg/invoke/exec.go | 144 + .../cni/pkg/invoke/find.go | 43 + .../cni/pkg/invoke/os_unix.go | 20 + .../cni/pkg/invoke/os_windows.go | 18 + .../cni/pkg/invoke/raw_exec.go | 62 + .../cni/pkg/types/020/types.go | 140 + .../containernetworking/cni/pkg/types/args.go | 112 + .../cni/pkg/types/current/types.go | 293 + .../cni/pkg/types/types.go | 199 + .../cni/pkg/version/conf.go | 37 + .../cni/pkg/version/plugin.go | 144 + .../cni/pkg/version/reconcile.go | 49 + .../cni/pkg/version/version.go | 83 + .../builder/dockerignore/dockerignore.go | 64 + .../docker/pkg/chrootarchive/archive.go | 106 + .../docker/pkg/chrootarchive/archive_unix.go | 208 + .../pkg/chrootarchive/archive_windows.go | 29 + .../docker/pkg/chrootarchive/chroot_linux.go | 114 + .../docker/pkg/chrootarchive/chroot_unix.go | 16 + .../docker/docker/pkg/chrootarchive/diff.go | 23 + .../docker/pkg/chrootarchive/diff_unix.go | 130 + .../docker/pkg/chrootarchive/diff_windows.go | 45 + .../docker/pkg/chrootarchive/init_unix.go | 29 + .../docker/pkg/chrootarchive/init_windows.go | 4 + .../docker/docker/pkg/locker/README.md | 65 + .../docker/docker/pkg/locker/locker.go | 112 + .../docker/docker/pkg/reexec/README.md | 5 + .../docker/docker/pkg/reexec/command_linux.go | 28 + .../docker/docker/pkg/reexec/command_unix.go | 23 + .../docker/pkg/reexec/command_unsupported.go | 16 + .../docker/pkg/reexec/command_windows.go | 21 + .../docker/docker/pkg/reexec/reexec.go | 47 + .../docker/docker/pkg/signal/README.md | 1 + .../docker/docker/pkg/signal/signal.go | 54 + .../docker/docker/pkg/signal/signal_darwin.go | 41 + .../docker/pkg/signal/signal_freebsd.go | 43 + .../docker/docker/pkg/signal/signal_linux.go | 83 + .../docker/pkg/signal/signal_linux_mipsx.go | 84 + .../docker/docker/pkg/signal/signal_unix.go | 21 + .../docker/pkg/signal/signal_unsupported.go | 10 + .../docker/pkg/signal/signal_windows.go | 26 + .../docker/docker/pkg/signal/trap.go | 104 + vendor/github.com/docker/go-events/.gitignore | 24 + .../docker/go-events/CONTRIBUTING.md | 70 + vendor/github.com/docker/go-events/LICENSE | 201 + .../github.com/docker/go-events/MAINTAINERS | 46 + vendor/github.com/docker/go-events/README.md | 117 + .../github.com/docker/go-events/broadcast.go | 178 + vendor/github.com/docker/go-events/channel.go | 61 + vendor/github.com/docker/go-events/errors.go | 10 + vendor/github.com/docker/go-events/event.go | 15 + vendor/github.com/docker/go-events/filter.go | 52 + vendor/github.com/docker/go-events/queue.go | 111 + vendor/github.com/docker/go-events/retry.go | 260 + vendor/github.com/docker/libnetwork/LICENSE | 202 + .../docker/libnetwork/resolvconf/README.md | 1 + .../libnetwork/resolvconf/dns/resolvconf.go | 26 + .../libnetwork/resolvconf/resolvconf.go | 285 + .../docker/libnetwork/types/types.go | 653 ++ vendor/github.com/genuinetools/img/AUTHORS | 16 + vendor/github.com/genuinetools/img/LICENSE | 21 + .../genuinetools/img/client/client.go | 57 + .../genuinetools/img/client/controller.go | 83 + .../genuinetools/img/client/diskusage.go | 26 + .../genuinetools/img/client/list.go | 67 + .../genuinetools/img/client/prune.go | 61 + .../genuinetools/img/client/pull.go | 117 + .../genuinetools/img/client/push.go | 38 + .../genuinetools/img/client/remove.go | 34 + .../genuinetools/img/client/save.go | 53 + .../genuinetools/img/client/session.go | 49 + .../genuinetools/img/client/solve.go | 68 + .../github.com/genuinetools/img/client/tag.go | 68 + .../genuinetools/img/client/unpack.go | 75 + .../genuinetools/img/client/workeropt.go | 156 + .../genuinetools/img/types/types.go | 10 + vendor/github.com/gofrs/flock/.gitignore | 24 + vendor/github.com/gofrs/flock/.travis.yml | 10 + vendor/github.com/gofrs/flock/LICENSE | 27 + vendor/github.com/gofrs/flock/README.md | 41 + vendor/github.com/gofrs/flock/appveyor.yml | 25 + vendor/github.com/gofrs/flock/flock.go | 127 + vendor/github.com/gofrs/flock/flock_unix.go | 195 + vendor/github.com/gofrs/flock/flock_winapi.go | 76 + .../github.com/gofrs/flock/flock_windows.go | 140 + vendor/github.com/gogo/googleapis/LICENSE | 203 + .../gogo/googleapis/google/rpc/code.pb.go | 257 + .../gogo/googleapis/google/rpc/code.proto | 185 + .../googleapis/google/rpc/error_details.pb.go | 4904 +++++++++ .../googleapis/google/rpc/error_details.proto | 200 + .../gogo/googleapis/google/rpc/status.pb.go | 731 ++ .../gogo/googleapis/google/rpc/status.proto | 94 + vendor/github.com/gogo/protobuf/types/any.go | 140 + .../github.com/gogo/protobuf/types/any.pb.go | 697 ++ .../github.com/gogo/protobuf/types/api.pb.go | 2143 ++++ vendor/github.com/gogo/protobuf/types/doc.go | 35 + .../gogo/protobuf/types/duration.go | 100 + .../gogo/protobuf/types/duration.pb.go | 520 + .../gogo/protobuf/types/duration_gogo.go | 100 + .../gogo/protobuf/types/empty.pb.go | 465 + .../gogo/protobuf/types/field_mask.pb.go | 741 ++ .../gogo/protobuf/types/protosize.go | 34 + .../gogo/protobuf/types/source_context.pb.go | 527 + .../gogo/protobuf/types/struct.pb.go | 2280 ++++ .../gogo/protobuf/types/timestamp.go | 130 + .../gogo/protobuf/types/timestamp.pb.go | 542 + .../gogo/protobuf/types/timestamp_gogo.go | 94 + .../github.com/gogo/protobuf/types/type.pb.go | 3370 ++++++ .../gogo/protobuf/types/wrappers.pb.go | 2730 +++++ .../gogo/protobuf/types/wrappers_gogo.go | 300 + vendor/github.com/google/shlex/COPYING | 202 + vendor/github.com/google/shlex/README | 2 + vendor/github.com/google/shlex/shlex.go | 417 + .../grpc-ecosystem/grpc-opentracing/LICENSE | 27 + .../grpc-ecosystem/grpc-opentracing/PATENTS | 23 + .../grpc-opentracing/go/otgrpc/README.md | 57 + .../grpc-opentracing/go/otgrpc/client.go | 239 + .../grpc-opentracing/go/otgrpc/errors.go | 69 + .../grpc-opentracing/go/otgrpc/options.go | 76 + .../grpc-opentracing/go/otgrpc/package.go | 5 + .../grpc-opentracing/go/otgrpc/server.go | 141 + .../grpc-opentracing/go/otgrpc/shared.go | 42 + .../hashicorp/go-immutable-radix/.gitignore | 24 + .../hashicorp/go-immutable-radix/.travis.yml | 3 + .../hashicorp/go-immutable-radix/LICENSE | 363 + .../hashicorp/go-immutable-radix/README.md | 41 + .../hashicorp/go-immutable-radix/edges.go | 21 + .../hashicorp/go-immutable-radix/iradix.go | 657 ++ .../hashicorp/go-immutable-radix/iter.go | 91 + .../hashicorp/go-immutable-radix/node.go | 352 + .../hashicorp/go-immutable-radix/raw_iter.go | 78 + .../github.com/hashicorp/golang-lru/LICENSE | 362 + .../hashicorp/golang-lru/simplelru/lru.go | 177 + .../golang-lru/simplelru/lru_interface.go | 39 + .../github.com/ishidawataru/sctp/.gitignore | 16 + .../github.com/ishidawataru/sctp/.travis.yml | 18 + .../github.com/ishidawataru/sctp/GO_LICENSE | 27 + vendor/github.com/ishidawataru/sctp/LICENSE | 201 + vendor/github.com/ishidawataru/sctp/NOTICE | 3 + vendor/github.com/ishidawataru/sctp/README.md | 18 + vendor/github.com/ishidawataru/sctp/go.mod | 3 + .../ishidawataru/sctp/ipsock_linux.go | 222 + vendor/github.com/ishidawataru/sctp/sctp.go | 729 ++ .../ishidawataru/sctp/sctp_linux.go | 305 + .../ishidawataru/sctp/sctp_unsupported.go | 98 + .../mitchellh/hashstructure/LICENSE | 21 + .../mitchellh/hashstructure/README.md | 65 + .../github.com/mitchellh/hashstructure/go.mod | 1 + .../mitchellh/hashstructure/hashstructure.go | 358 + .../mitchellh/hashstructure/include.go | 15 + vendor/github.com/moby/buildkit/AUTHORS | 66 + vendor/github.com/moby/buildkit/LICENSE | 201 + .../api/services/control/control.pb.go | 6477 +++++++++++ .../api/services/control/control.proto | 150 + .../buildkit/api/services/control/generate.go | 3 + .../moby/buildkit/api/types/generate.go | 3 + .../moby/buildkit/api/types/worker.pb.go | 929 ++ .../moby/buildkit/api/types/worker.proto | 24 + .../moby/buildkit/cache/blobs/blobs.go | 194 + .../moby/buildkit/cache/blobs/compression.go | 122 + .../buildkit/cache/contenthash/checksum.go | 909 ++ .../buildkit/cache/contenthash/checksum.pb.go | 864 ++ .../buildkit/cache/contenthash/checksum.proto | 30 + .../buildkit/cache/contenthash/filehash.go | 101 + .../cache/contenthash/filehash_unix.go | 47 + .../cache/contenthash/filehash_windows.go | 23 + .../buildkit/cache/contenthash/generate.go | 3 + .../moby/buildkit/cache/contenthash/path.go | 107 + .../moby/buildkit/cache/contenthash/tarsum.go | 63 + .../github.com/moby/buildkit/cache/manager.go | 1092 ++ .../moby/buildkit/cache/metadata.go | 501 + .../moby/buildkit/cache/metadata/metadata.go | 394 + .../moby/buildkit/cache/migrate_v2.go | 257 + vendor/github.com/moby/buildkit/cache/refs.go | 693 ++ .../moby/buildkit/cache/remotecache/export.go | 142 + .../moby/buildkit/cache/remotecache/import.go | 299 + .../cache/remotecache/inline/inline.go | 106 + .../buildkit/cache/remotecache/local/local.go | 83 + .../cache/remotecache/registry/registry.go | 96 + .../cache/remotecache/v1/cachestorage.go | 297 + .../buildkit/cache/remotecache/v1/chains.go | 158 + .../moby/buildkit/cache/remotecache/v1/doc.go | 50 + .../buildkit/cache/remotecache/v1/parse.go | 110 + .../buildkit/cache/remotecache/v1/spec.go | 35 + .../buildkit/cache/remotecache/v1/utils.go | 322 + .../moby/buildkit/cache/util/fsutil.go | 139 + .../github.com/moby/buildkit/client/build.go | 110 + .../moby/buildkit/client/buildid/metadata.go | 29 + .../github.com/moby/buildkit/client/client.go | 169 + .../moby/buildkit/client/client_unix.go | 19 + .../moby/buildkit/client/client_windows.go | 24 + .../buildkit/client/connhelper/connhelper.go | 37 + .../moby/buildkit/client/diskusage.go | 84 + .../moby/buildkit/client/exporters.go | 9 + .../github.com/moby/buildkit/client/filter.go | 19 + .../github.com/moby/buildkit/client/graph.go | 46 + .../moby/buildkit/client/llb/definition.go | 161 + .../moby/buildkit/client/llb/exec.go | 654 ++ .../moby/buildkit/client/llb/fileop.go | 727 ++ .../client/llb/imagemetaresolver/resolver.go | 109 + .../moby/buildkit/client/llb/marshal.go | 112 + .../moby/buildkit/client/llb/meta.go | 233 + .../moby/buildkit/client/llb/resolver.go | 26 + .../moby/buildkit/client/llb/source.go | 428 + .../moby/buildkit/client/llb/state.go | 515 + .../moby/buildkit/client/ociindex/ociindex.go | 113 + .../github.com/moby/buildkit/client/prune.go | 83 + .../github.com/moby/buildkit/client/solve.go | 489 + .../moby/buildkit/client/workers.go | 70 + .../buildkit/cmd/buildkitd/config/config.go | 105 + .../buildkit/cmd/buildkitd/config/gcpolicy.go | 31 + .../cmd/buildkitd/config/gcpolicy_unix.go | 17 + .../cmd/buildkitd/config/gcpolicy_windows.go | 7 + .../moby/buildkit/control/control.go | 450 + .../moby/buildkit/control/gateway/gateway.go | 154 + .../moby/buildkit/executor/executor.go | 39 + .../moby/buildkit/executor/oci/hosts.go | 78 + .../moby/buildkit/executor/oci/mounts.go | 117 + .../moby/buildkit/executor/oci/resolvconf.go | 123 + .../moby/buildkit/executor/oci/spec.go | 13 + .../moby/buildkit/executor/oci/spec_unix.go | 254 + .../moby/buildkit/executor/oci/user.go | 99 + .../executor/runcexecutor/executor.go | 337 + .../exporter/containerimage/export.go | 319 + .../exporter/containerimage/exptypes/types.go | 16 + .../exporter/containerimage/writer.go | 523 + .../moby/buildkit/exporter/exporter.go | 22 + .../moby/buildkit/exporter/local/export.go | 173 + .../moby/buildkit/exporter/oci/export.go | 228 + .../moby/buildkit/exporter/tar/export.go | 177 + .../frontend/dockerfile/builder/build.go | 641 ++ .../frontend/dockerfile/command/command.go | 46 + .../dockerfile/dockerfile2llb/convert.go | 1323 +++ .../dockerfile2llb/convert_norunmount.go | 16 + .../dockerfile2llb/convert_norunnetwork.go | 12 + .../dockerfile2llb/convert_norunsecurity.go | 12 + .../dockerfile2llb/convert_nosecrets.go | 13 + .../dockerfile2llb/convert_nossh.go | 13 + .../dockerfile2llb/convert_runmount.go | 156 + .../dockerfile2llb/convert_runnetwork.go | 26 + .../dockerfile2llb/convert_runsecurity.go | 24 + .../dockerfile2llb/convert_secrets.go | 54 + .../dockerfile/dockerfile2llb/convert_ssh.go | 42 + .../dockerfile2llb/defaultshell_unix.go | 7 + .../dockerfile2llb/defaultshell_windows.go | 7 + .../dockerfile/dockerfile2llb/directives.go | 38 + .../dockerfile/dockerfile2llb/image.go | 79 + .../dockerfile/dockerfile2llb/platform.go | 58 + .../frontend/dockerfile/instructions/bflag.go | 200 + .../dockerfile/instructions/commands.go | 451 + .../instructions/commands_nosecrets.go | 7 + .../dockerfile/instructions/commands_nossh.go | 7 + .../instructions/commands_runmount.go | 263 + .../instructions/commands_runnetwork.go | 63 + .../instructions/commands_runsecurity.go | 61 + .../instructions/commands_secrets.go | 7 + .../dockerfile/instructions/commands_ssh.go | 7 + .../dockerfile/instructions/errors_unix.go | 9 + .../dockerfile/instructions/errors_windows.go | 27 + .../frontend/dockerfile/instructions/parse.go | 650 ++ .../dockerfile/instructions/support.go | 19 + .../dockerfile/parser/line_parsers.go | 368 + .../frontend/dockerfile/parser/parser.go | 332 + .../dockerfile/parser/split_command.go | 118 + .../frontend/dockerfile/shell/envVarTest | 238 + .../dockerfile/shell/equal_env_unix.go | 10 + .../dockerfile/shell/equal_env_windows.go | 10 + .../buildkit/frontend/dockerfile/shell/lex.go | 466 + .../frontend/dockerfile/shell/wordsTest | 30 + .../moby/buildkit/frontend/frontend.go | 32 + .../frontend/gateway/client/client.go | 74 + .../frontend/gateway/client/result.go | 54 + .../frontend/gateway/forwarder/forward.go | 217 + .../frontend/gateway/forwarder/frontend.go | 39 + .../moby/buildkit/frontend/gateway/gateway.go | 756 ++ .../frontend/gateway/grpcclient/client.go | 591 + .../moby/buildkit/frontend/gateway/pb/caps.go | 139 + .../frontend/gateway/pb/gateway.pb.go | 7008 ++++++++++++ .../frontend/gateway/pb/gateway.proto | 164 + .../buildkit/frontend/gateway/pb/generate.go | 3 + .../moby/buildkit/frontend/result.go | 25 + .../moby/buildkit/identity/randomid.go | 53 + .../moby/buildkit/session/auth/auth.go | 27 + .../moby/buildkit/session/auth/auth.pb.go | 740 ++ .../moby/buildkit/session/auth/auth.proto | 19 + .../session/auth/authprovider/authprovider.go | 53 + .../moby/buildkit/session/auth/generate.go | 3 + .../buildkit/session/content/attachable.go | 132 + .../moby/buildkit/session/content/caller.go | 91 + .../moby/buildkit/session/context.go | 22 + .../buildkit/session/filesync/diffcopy.go | 117 + .../buildkit/session/filesync/filesync.go | 326 + .../buildkit/session/filesync/filesync.pb.go | 675 ++ .../buildkit/session/filesync/filesync.proto | 20 + .../buildkit/session/filesync/generate.go | 3 + .../github.com/moby/buildkit/session/grpc.go | 81 + .../moby/buildkit/session/grpchijack/dial.go | 162 + .../buildkit/session/grpchijack/hijack.go | 15 + .../moby/buildkit/session/manager.go | 220 + .../moby/buildkit/session/secrets/generate.go | 3 + .../moby/buildkit/session/secrets/secrets.go | 30 + .../buildkit/session/secrets/secrets.pb.go | 886 ++ .../buildkit/session/secrets/secrets.proto | 19 + .../moby/buildkit/session/session.go | 143 + .../moby/buildkit/session/sshforward/copy.go | 65 + .../buildkit/session/sshforward/generate.go | 3 + .../moby/buildkit/session/sshforward/ssh.go | 118 + .../buildkit/session/sshforward/ssh.pb.go | 918 ++ .../buildkit/session/sshforward/ssh.proto | 22 + .../buildkit/session/testutil/testutil.go | 70 + .../moby/buildkit/session/upload/generate.go | 3 + .../moby/buildkit/session/upload/upload.go | 56 + .../moby/buildkit/session/upload/upload.pb.go | 501 + .../moby/buildkit/session/upload/upload.proto | 14 + .../buildkit/snapshot/containerd/content.go | 82 + .../snapshot/containerd/snapshotter.go | 63 + .../snapshot/imagerefchecker/checker.go | 135 + .../moby/buildkit/snapshot/localmounter.go | 74 + .../buildkit/snapshot/localmounter_unix.go | 29 + .../buildkit/snapshot/localmounter_windows.go | 26 + .../moby/buildkit/snapshot/snapshotter.go | 151 + .../solver/bboltcachestorage/storage.go | 459 + .../moby/buildkit/solver/cachekey.go | 66 + .../moby/buildkit/solver/cachemanager.go | 358 + .../moby/buildkit/solver/cachestorage.go | 51 + .../moby/buildkit/solver/combinedcache.go | 143 + .../github.com/moby/buildkit/solver/edge.go | 950 ++ .../moby/buildkit/solver/exporter.go | 211 + .../github.com/moby/buildkit/solver/index.go | 245 + .../buildkit/solver/internal/pipe/pipe.go | 199 + .../github.com/moby/buildkit/solver/jobs.go | 806 ++ .../moby/buildkit/solver/llbsolver/bridge.go | 324 + .../buildkit/solver/llbsolver/file/backend.go | 342 + .../solver/llbsolver/file/refmanager.go | 70 + .../buildkit/solver/llbsolver/file/unpack.go | 61 + .../solver/llbsolver/file/user_linux.go | 119 + .../solver/llbsolver/file/user_nolinux.go | 14 + .../buildkit/solver/llbsolver/ops/build.go | 141 + .../buildkit/solver/llbsolver/ops/exec.go | 905 ++ .../buildkit/solver/llbsolver/ops/file.go | 583 + .../solver/llbsolver/ops/fileoptypes/types.go | 28 + .../buildkit/solver/llbsolver/ops/source.go | 92 + .../moby/buildkit/solver/llbsolver/result.go | 74 + .../moby/buildkit/solver/llbsolver/solver.go | 418 + .../moby/buildkit/solver/llbsolver/vertex.go | 353 + .../buildkit/solver/memorycachestorage.go | 307 + .../moby/buildkit/solver/pb/attr.go | 25 + .../moby/buildkit/solver/pb/caps.go | 299 + .../moby/buildkit/solver/pb/const.go | 25 + .../moby/buildkit/solver/pb/generate.go | 3 + .../moby/buildkit/solver/pb/ops.pb.go | 9629 +++++++++++++++++ .../moby/buildkit/solver/pb/ops.proto | 305 + .../moby/buildkit/solver/pb/platform.go | 41 + .../moby/buildkit/solver/progress.go | 109 + .../github.com/moby/buildkit/solver/result.go | 105 + .../moby/buildkit/solver/scheduler.go | 410 + .../github.com/moby/buildkit/solver/types.go | 214 + .../buildkit/source/containerimage/pull.go | 295 + .../moby/buildkit/source/git/gitsource.go | 441 + .../buildkit/source/git/gitsource_unix.go | 35 + .../buildkit/source/git/gitsource_windows.go | 23 + .../moby/buildkit/source/gitidentifier.go | 70 + .../moby/buildkit/source/http/httpsource.go | 500 + .../moby/buildkit/source/http/transport.go | 60 + .../moby/buildkit/source/identifier.go | 275 + .../moby/buildkit/source/local/local.go | 279 + .../moby/buildkit/source/manager.go | 49 + .../moby/buildkit/util/apicaps/caps.go | 162 + .../moby/buildkit/util/apicaps/pb/caps.pb.go | 570 + .../moby/buildkit/util/apicaps/pb/caps.proto | 19 + .../moby/buildkit/util/apicaps/pb/generate.go | 3 + .../buildkit/util/appcontext/appcontext.go | 41 + .../util/appcontext/appcontext_unix.go | 11 + .../util/appcontext/appcontext_windows.go | 7 + .../util/appdefaults/appdefaults_unix.go | 69 + .../util/appdefaults/appdefaults_windows.go | 31 + .../buildkit/util/binfmt_misc/386_binary.go | 8 + .../buildkit/util/binfmt_misc/386_check.go | 7 + .../util/binfmt_misc/386_check_386.go | 7 + .../moby/buildkit/util/binfmt_misc/Dockerfile | 56 + .../moby/buildkit/util/binfmt_misc/Makefile | 4 + .../buildkit/util/binfmt_misc/amd64_binary.go | 8 + .../buildkit/util/binfmt_misc/amd64_check.go | 7 + .../util/binfmt_misc/amd64_check_amd64.go | 7 + .../buildkit/util/binfmt_misc/arm64_binary.go | 8 + .../buildkit/util/binfmt_misc/arm64_check.go | 7 + .../util/binfmt_misc/arm64_check_arm64.go | 7 + .../buildkit/util/binfmt_misc/arm_binary.go | 8 + .../buildkit/util/binfmt_misc/arm_check.go | 7 + .../util/binfmt_misc/arm_check_arm.go | 7 + .../moby/buildkit/util/binfmt_misc/check.go | 42 + .../buildkit/util/binfmt_misc/check_unix.go | 14 + .../util/binfmt_misc/check_windows.go | 10 + .../moby/buildkit/util/binfmt_misc/detect.go | 134 + .../util/binfmt_misc/ppc64le_binary.go | 8 + .../util/binfmt_misc/ppc64le_check.go | 7 + .../util/binfmt_misc/ppc64le_check_ppc64le.go | 7 + .../util/binfmt_misc/riscv64_binary.go | 8 + .../util/binfmt_misc/riscv64_check.go | 7 + .../util/binfmt_misc/riscv64_check_riscv64.go | 7 + .../buildkit/util/binfmt_misc/s390x_binary.go | 8 + .../buildkit/util/binfmt_misc/s390x_check.go | 7 + .../util/binfmt_misc/s390x_check_s390x.go | 7 + .../moby/buildkit/util/cond/cond.go | 40 + .../moby/buildkit/util/contentutil/buffer.go | 156 + .../moby/buildkit/util/contentutil/copy.go | 81 + .../moby/buildkit/util/contentutil/fetcher.go | 73 + .../util/contentutil/multiprovider.go | 48 + .../moby/buildkit/util/contentutil/pusher.go | 122 + .../moby/buildkit/util/contentutil/refs.go | 98 + .../util/entitlements/entitlements.go | 60 + .../entitlements/security/security_linux.go | 163 + .../util/flightcontrol/flightcontrol.go | 341 + .../moby/buildkit/util/imageutil/config.go | 196 + .../moby/buildkit/util/imageutil/schema1.go | 87 + .../moby/buildkit/util/leaseutil/manager.go | 75 + .../util/network/cniprovider/allowempty.s | 0 .../buildkit/util/network/cniprovider/cni.go | 120 + .../util/network/cniprovider/cni_unsafe.go | 16 + .../network/cniprovider/createns_linux.go | 59 + .../network/cniprovider/createns_nolinux.go | 9 + .../moby/buildkit/util/network/host.go | 28 + .../util/network/netproviders/network.go | 50 + .../moby/buildkit/util/network/network.go | 19 + .../moby/buildkit/util/network/none.go | 26 + .../moby/buildkit/util/progress/logs/logs.go | 53 + .../buildkit/util/progress/multireader.go | 77 + .../buildkit/util/progress/multiwriter.go | 105 + .../moby/buildkit/util/progress/progress.go | 256 + .../moby/buildkit/util/pull/pull.go | 417 + .../moby/buildkit/util/pull/resolver.go | 178 + .../moby/buildkit/util/push/push.go | 285 + .../moby/buildkit/util/resolver/resolver.go | 219 + .../util/rootless/specconv/specconv_linux.go | 40 + .../moby/buildkit/util/system/path_unix.go | 14 + .../moby/buildkit/util/system/path_windows.go | 37 + .../buildkit/util/system/seccomp_linux.go | 29 + .../buildkit/util/system/seccomp_nolinux.go | 7 + .../buildkit/util/system/seccomp_noseccomp.go | 7 + .../moby/buildkit/util/throttle/throttle.go | 58 + .../moby/buildkit/util/tracing/multispan.go | 22 + .../moby/buildkit/util/tracing/tracing.go | 115 + .../moby/buildkit/util/winlayers/applier.go | 187 + .../moby/buildkit/util/winlayers/context.go | 19 + .../moby/buildkit/util/winlayers/differ.go | 274 + .../moby/buildkit/worker/base/worker.go | 550 + .../moby/buildkit/worker/cacheresult.go | 100 + .../github.com/moby/buildkit/worker/filter.go | 33 + .../github.com/moby/buildkit/worker/result.go | 40 + .../github.com/moby/buildkit/worker/worker.go | 48 + .../moby/buildkit/worker/workercontroller.go | 77 + vendor/github.com/moby/sys/mount/doc.go | 4 + .../mount/{flags_freebsd.go => flags_bsd.go} | 17 +- .../sys/mount/{flags.go => flags_unix.go} | 2 + .../moby/sys/mount/flags_unsupported.go | 30 - vendor/github.com/moby/sys/mount/go.mod | 4 +- vendor/github.com/moby/sys/mount/go.sum | 9 +- vendor/github.com/moby/sys/mount/mount.go | 56 - .../github.com/moby/sys/mount/mount_unix.go | 87 + .../{mounter_freebsd.go => mounter_bsd.go} | 2 + .../moby/sys/mount/mounter_unsupported.go | 4 +- .../github.com/moby/sys/mount/unmount_unix.go | 22 - .../moby/sys/mount/unmount_unsupported.go | 7 - vendor/github.com/moby/sys/mountinfo/doc.go | 44 + vendor/github.com/moby/sys/mountinfo/go.mod | 2 + vendor/github.com/moby/sys/mountinfo/go.sum | 2 + .../moby/sys/mountinfo/mounted_linux.go | 58 + .../moby/sys/mountinfo/mounted_unix.go | 66 + .../moby/sys/mountinfo/mountinfo.go | 34 +- ...{mountinfo_freebsd.go => mountinfo_bsd.go} | 19 +- .../moby/sys/mountinfo/mountinfo_filters.go | 28 +- .../moby/sys/mountinfo/mountinfo_linux.go | 134 +- .../sys/mountinfo/mountinfo_unsupported.go | 12 +- .../moby/sys/mountinfo/mountinfo_windows.go | 6 +- .../image-spec/identity/chainid.go | 67 + .../image-spec/identity/helpers.go | 40 + .../runc/libcontainer/system/linux.go | 87 +- .../runc/libcontainer/system/proc.go | 112 +- .../runc/libcontainer/system/setns_linux.go | 40 - ...scall_linux_arm.go => syscall_linux_32.go} | 9 +- .../libcontainer/system/syscall_linux_386.go | 25 - .../libcontainer/system/syscall_linux_64.go | 9 +- .../runc/libcontainer/system/sysconfig.go | 2 +- .../runc/libcontainer/system/unsupported.go | 18 + .../runc/libcontainer/system/xattrs_linux.go | 88 +- .../runc/libcontainer/user/lookup.go | 77 +- .../runc/libcontainer/user/lookup_unix.go | 114 + .../libcontainer/user/lookup_unsupported.go | 21 - .../runc/libcontainer/user/lookup_windows.go | 40 + .../runc/libcontainer/user/user.go | 201 +- .../opentracing-contrib/go-stdlib/LICENSE | 27 + .../go-stdlib/nethttp/client.go | 301 + .../go-stdlib/nethttp/doc.go | 3 + .../go-stdlib/nethttp/server.go | 119 + .../opentracing/opentracing-go/.gitignore | 1 + .../opentracing/opentracing-go/.travis.yml | 20 + .../opentracing/opentracing-go/CHANGELOG.md | 46 + .../opentracing/opentracing-go/LICENSE | 201 + .../opentracing/opentracing-go/Makefile | 20 + .../opentracing/opentracing-go/README.md | 171 + .../opentracing/opentracing-go/ext/tags.go | 210 + .../opentracing-go/globaltracer.go | 42 + .../opentracing/opentracing-go/gocontext.go | 60 + .../opentracing/opentracing-go/log/field.go | 269 + .../opentracing/opentracing-go/log/util.go | 54 + .../opentracing/opentracing-go/noop.go | 64 + .../opentracing/opentracing-go/propagation.go | 176 + .../opentracing/opentracing-go/span.go | 189 + .../opentracing/opentracing-go/tracer.go | 304 + .../pelletier/go-toml/.dockerignore | 2 - .../github.com/pelletier/go-toml/.gitignore | 3 - .../github.com/pelletier/go-toml/.travis.yml | 23 + .../pelletier/go-toml/CONTRIBUTING.md | 132 - .../github.com/pelletier/go-toml/Dockerfile | 11 - .../go-toml/PULL_REQUEST_TEMPLATE.md | 5 - vendor/github.com/pelletier/go-toml/README.md | 38 +- .../pelletier/go-toml/azure-pipelines.yml | 167 - .../github.com/pelletier/go-toml/benchmark.sh | 5 +- vendor/github.com/pelletier/go-toml/doc.go | 2 +- vendor/github.com/pelletier/go-toml/fuzzit.sh | 26 - vendor/github.com/pelletier/go-toml/go.mod | 9 - vendor/github.com/pelletier/go-toml/go.sum | 11 - .../pelletier/go-toml/keysparsing.go | 150 +- vendor/github.com/pelletier/go-toml/lexer.go | 46 +- .../github.com/pelletier/go-toml/localtime.go | 281 - .../github.com/pelletier/go-toml/marshal.go | 411 +- .../go-toml/marshal_OrderPreserve_test.toml | 39 - .../pelletier/go-toml/marshal_test.toml | 1 - vendor/github.com/pelletier/go-toml/parser.go | 73 +- vendor/github.com/pelletier/go-toml/test.sh | 88 + vendor/github.com/pelletier/go-toml/token.go | 13 +- vendor/github.com/pelletier/go-toml/toml.go | 120 +- .../pelletier/go-toml/tomltree_write.go | 302 +- vendor/github.com/spf13/viper/.editorconfig | 15 + vendor/github.com/spf13/viper/.gitignore | 17 +- vendor/github.com/spf13/viper/.golangci.yml | 5 +- vendor/github.com/spf13/viper/.travis.yml | 32 - vendor/github.com/spf13/viper/README.md | 31 +- vendor/github.com/spf13/viper/go.mod | 13 +- vendor/github.com/spf13/viper/go.sum | 222 +- vendor/github.com/spf13/viper/viper.go | 58 +- vendor/github.com/syndtr/gocapability/LICENSE | 24 + .../gocapability/capability/capability.go | 133 + .../capability/capability_linux.go | 642 ++ .../capability/capability_noop.go | 19 + .../syndtr/gocapability/capability/enum.go | 268 + .../gocapability/capability/enum_gen.go | 129 + .../gocapability/capability/syscall_linux.go | 154 + .../github.com/tonistiigi/fsutil/.travis.yml | 23 + vendor/github.com/tonistiigi/fsutil/LICENSE | 22 + .../tonistiigi/fsutil/chtimes_linux.go | 20 + .../tonistiigi/fsutil/chtimes_nolinux.go | 20 + .../github.com/tonistiigi/fsutil/copy/copy.go | 412 + .../tonistiigi/fsutil/copy/copy_darwin.go | 84 + .../tonistiigi/fsutil/copy/copy_linux.go | 114 + .../tonistiigi/fsutil/copy/copy_nowindows.go | 28 + .../tonistiigi/fsutil/copy/copy_unix.go | 61 + .../tonistiigi/fsutil/copy/copy_windows.go | 48 + .../tonistiigi/fsutil/copy/hardlink.go | 27 + .../tonistiigi/fsutil/copy/hardlink_unix.go | 17 + .../fsutil/copy/hardlink_windows.go | 7 + .../tonistiigi/fsutil/copy/mkdir.go | 83 + .../tonistiigi/fsutil/copy/mkdir_unix.go | 32 + .../tonistiigi/fsutil/copy/mkdir_windows.go | 21 + .../tonistiigi/fsutil/copy/stat_bsd.go | 17 + .../tonistiigi/fsutil/copy/stat_sysv.go | 17 + vendor/github.com/tonistiigi/fsutil/diff.go | 51 + .../tonistiigi/fsutil/diff_containerd.go | 197 + .../fsutil/diff_containerd_linux.go | 37 + .../tonistiigi/fsutil/diskwriter.go | 352 + .../tonistiigi/fsutil/diskwriter_unix.go | 52 + .../tonistiigi/fsutil/diskwriter_windows.go | 18 + .../tonistiigi/fsutil/followlinks.go | 150 + vendor/github.com/tonistiigi/fsutil/fs.go | 118 + vendor/github.com/tonistiigi/fsutil/go.mod | 28 + vendor/github.com/tonistiigi/fsutil/go.sum | 70 + .../github.com/tonistiigi/fsutil/hardlinks.go | 47 + vendor/github.com/tonistiigi/fsutil/readme.md | 45 + .../github.com/tonistiigi/fsutil/receive.go | 276 + vendor/github.com/tonistiigi/fsutil/send.go | 206 + vendor/github.com/tonistiigi/fsutil/stat.go | 64 + .../github.com/tonistiigi/fsutil/stat_unix.go | 71 + .../tonistiigi/fsutil/stat_windows.go | 16 + .../github.com/tonistiigi/fsutil/tarwriter.go | 76 + .../tonistiigi/fsutil/types/generate.go | 3 + .../tonistiigi/fsutil/types/stat.go | 7 + .../tonistiigi/fsutil/types/stat.pb.go | 929 ++ .../tonistiigi/fsutil/types/stat.proto | 19 + .../tonistiigi/fsutil/types/wire.pb.go | 575 + .../tonistiigi/fsutil/types/wire.proto | 21 + .../github.com/tonistiigi/fsutil/validator.go | 92 + vendor/github.com/tonistiigi/fsutil/walker.go | 230 + vendor/github.com/urfave/cli/.travis.yml | 4 +- vendor/github.com/urfave/cli/CHANGELOG.md | 504 - vendor/github.com/urfave/cli/CONTRIBUTING.md | 18 - vendor/github.com/urfave/cli/README.md | 1523 +-- vendor/github.com/urfave/cli/app.go | 8 +- vendor/github.com/urfave/cli/appveyor.yml | 11 +- vendor/github.com/urfave/cli/command.go | 82 +- vendor/github.com/urfave/cli/flag.go | 2 +- vendor/github.com/urfave/cli/help.go | 55 +- vendor/github.com/urfave/cli/parse.go | 47 +- vendor/go.etcd.io/bbolt/README.md | 11 +- vendor/go.etcd.io/bbolt/freelist.go | 57 +- vendor/go.etcd.io/bbolt/node.go | 25 +- vendor/go.etcd.io/bbolt/page.go | 57 +- vendor/go.etcd.io/bbolt/tx.go | 27 +- vendor/go.etcd.io/bbolt/unsafe.go | 39 + .../x/net/internal/timeseries/timeseries.go | 525 + vendor/golang.org/x/net/trace/events.go | 532 + vendor/golang.org/x/net/trace/histogram.go | 365 + vendor/golang.org/x/net/trace/trace.go | 1130 ++ .../golang.org/x/sync/semaphore/semaphore.go | 136 + vendor/google.golang.org/grpc/.travis.yml | 42 + .../google.golang.org/grpc/CODE-OF-CONDUCT.md | 3 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 62 + vendor/google.golang.org/grpc/GOVERNANCE.md | 1 + vendor/google.golang.org/grpc/MAINTAINERS.md | 27 + vendor/google.golang.org/grpc/Makefile | 63 + vendor/google.golang.org/grpc/README.md | 141 + .../grpc/attributes/attributes.go | 70 + vendor/google.golang.org/grpc/backoff.go | 58 + .../google.golang.org/grpc/backoff/backoff.go | 52 + vendor/google.golang.org/grpc/balancer.go | 391 + .../grpc/balancer/balancer.go | 454 + .../grpc/balancer/base/balancer.go | 286 + .../grpc/balancer/base/base.go | 93 + .../grpc/balancer/roundrobin/roundrobin.go | 81 + .../grpc/balancer_conn_wrappers.go | 271 + .../grpc/balancer_v1_wrapper.go | 334 + .../grpc_binarylog_v1/binarylog.pb.go | 900 ++ vendor/google.golang.org/grpc/call.go | 74 + vendor/google.golang.org/grpc/clientconn.go | 1534 +++ vendor/google.golang.org/grpc/codec.go | 50 + vendor/google.golang.org/grpc/codegen.sh | 17 + .../grpc/connectivity/connectivity.go | 73 + .../grpc/credentials/credentials.go | 255 + .../grpc/credentials/go12.go | 30 + .../grpc/credentials/internal/syscallconn.go | 61 + .../internal/syscallconn_appengine.go | 30 + .../google.golang.org/grpc/credentials/tls.go | 235 + vendor/google.golang.org/grpc/dialoptions.go | 606 ++ vendor/google.golang.org/grpc/doc.go | 24 + .../grpc/encoding/encoding.go | 122 + .../grpc/encoding/proto/proto.go | 110 + vendor/google.golang.org/grpc/go.mod | 16 + vendor/google.golang.org/grpc/go.sum | 64 + .../google.golang.org/grpc/grpclog/grpclog.go | 132 + .../google.golang.org/grpc/grpclog/logger.go | 87 + .../grpc/grpclog/loggerv2.go | 214 + .../google.golang.org/grpc/health/client.go | 117 + .../grpc/health/grpc_health_v1/health.pb.go | 343 + .../grpc/health/regenerate.sh | 33 + .../google.golang.org/grpc/health/server.go | 165 + vendor/google.golang.org/grpc/install_gae.sh | 6 + vendor/google.golang.org/grpc/interceptor.go | 77 + .../grpc/internal/backoff/backoff.go | 73 + .../grpc/internal/balancerload/load.go | 46 + .../grpc/internal/binarylog/binarylog.go | 167 + .../internal/binarylog/binarylog_testutil.go | 42 + .../grpc/internal/binarylog/env_config.go | 210 + .../grpc/internal/binarylog/method_logger.go | 423 + .../grpc/internal/binarylog/regenerate.sh | 33 + .../grpc/internal/binarylog/sink.go | 162 + .../grpc/internal/binarylog/util.go | 41 + .../grpc/internal/buffer/unbounded.go | 85 + .../grpc/internal/channelz/funcs.go | 739 ++ .../grpc/internal/channelz/logging.go | 100 + .../grpc/internal/channelz/types.go | 702 ++ .../grpc/internal/channelz/types_linux.go | 53 + .../grpc/internal/channelz/types_nonlinux.go | 44 + .../grpc/internal/channelz/util_linux.go | 39 + .../grpc/internal/channelz/util_nonlinux.go | 26 + .../grpc/internal/envconfig/envconfig.go | 38 + .../grpc/internal/grpclog/grpclog.go | 118 + .../grpc/internal/grpclog/prefixLogger.go | 63 + .../grpc/internal/grpcrand/grpcrand.go | 56 + .../grpc/internal/grpcsync/event.go | 61 + .../grpc/internal/grpcutil/target.go | 55 + .../grpc/internal/internal.go | 67 + .../internal/resolver/dns/dns_resolver.go | 441 + .../grpc/internal/resolver/dns/go113.go | 33 + .../resolver/passthrough/passthrough.go | 57 + .../grpc/internal/syscall/syscall_linux.go | 114 + .../grpc/internal/syscall/syscall_nonlinux.go | 73 + .../grpc/internal/transport/bdp_estimator.go | 141 + .../grpc/internal/transport/controlbuf.go | 926 ++ .../grpc/internal/transport/defaults.go | 49 + .../grpc/internal/transport/flowcontrol.go | 217 + .../grpc/internal/transport/handler_server.go | 462 + .../grpc/internal/transport/http2_client.go | 1462 +++ .../grpc/internal/transport/http2_server.go | 1251 +++ .../grpc/internal/transport/http_util.go | 677 ++ .../grpc/internal/transport/log.go | 44 + .../grpc/internal/transport/transport.go | 808 ++ .../grpc/keepalive/keepalive.go | 85 + .../grpc/metadata/metadata.go | 209 + .../grpc/naming/dns_resolver.go | 293 + .../google.golang.org/grpc/naming/naming.go | 68 + vendor/google.golang.org/grpc/peer/peer.go | 51 + .../google.golang.org/grpc/picker_wrapper.go | 229 + vendor/google.golang.org/grpc/pickfirst.go | 159 + vendor/google.golang.org/grpc/preloader.go | 64 + vendor/google.golang.org/grpc/proxy.go | 152 + .../grpc/resolver/resolver.go | 253 + .../grpc/resolver_conn_wrapper.go | 222 + vendor/google.golang.org/grpc/rpc_util.go | 889 ++ vendor/google.golang.org/grpc/server.go | 1640 +++ .../google.golang.org/grpc/service_config.go | 434 + .../grpc/serviceconfig/serviceconfig.go | 41 + .../google.golang.org/grpc/stats/handlers.go | 63 + vendor/google.golang.org/grpc/stats/stats.go | 314 + vendor/google.golang.org/grpc/stream.go | 1528 +++ vendor/google.golang.org/grpc/tap/tap.go | 51 + vendor/google.golang.org/grpc/trace.go | 123 + vendor/google.golang.org/grpc/version.go | 22 + vendor/google.golang.org/grpc/vet.sh | 163 + vendor/modules.txt | 312 +- 930 files changed, 183699 insertions(+), 4609 deletions(-) create mode 100644 vendor/github.com/containerd/console/.travis.yml create mode 100644 vendor/github.com/containerd/console/LICENSE create mode 100644 vendor/github.com/containerd/console/README.md create mode 100644 vendor/github.com/containerd/console/console.go create mode 100644 vendor/github.com/containerd/console/console_linux.go create mode 100644 vendor/github.com/containerd/console/console_unix.go create mode 100644 vendor/github.com/containerd/console/console_windows.go create mode 100644 vendor/github.com/containerd/console/go.mod create mode 100644 vendor/github.com/containerd/console/go.sum create mode 100644 vendor/github.com/containerd/console/tc_darwin.go create mode 100644 vendor/github.com/containerd/console/tc_freebsd.go create mode 100644 vendor/github.com/containerd/console/tc_linux.go create mode 100644 vendor/github.com/containerd/console/tc_openbsd_cgo.go create mode 100644 vendor/github.com/containerd/console/tc_openbsd_nocgo.go create mode 100644 vendor/github.com/containerd/console/tc_solaris_cgo.go create mode 100644 vendor/github.com/containerd/console/tc_solaris_nocgo.go create mode 100644 vendor/github.com/containerd/console/tc_unix.go create mode 100644 vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go create mode 100644 vendor/github.com/containerd/containerd/api/services/content/v1/content.proto create mode 100644 vendor/github.com/containerd/containerd/archive/compression/compression.go create mode 100644 vendor/github.com/containerd/containerd/containers/containers.go create mode 100644 vendor/github.com/containerd/containerd/content/content.go create mode 100644 vendor/github.com/containerd/containerd/content/helpers.go create mode 100644 vendor/github.com/containerd/containerd/content/local/locks.go create mode 100644 vendor/github.com/containerd/containerd/content/local/readerat.go create mode 100644 vendor/github.com/containerd/containerd/content/local/store.go rename vendor/github.com/containerd/containerd/{sys/filesys.go => content/local/store_unix.go} (61%) create mode 100644 vendor/github.com/containerd/containerd/content/local/store_windows.go create mode 100644 vendor/github.com/containerd/containerd/content/local/writer.go create mode 100644 vendor/github.com/containerd/containerd/content/proxy/content_reader.go create mode 100644 vendor/github.com/containerd/containerd/content/proxy/content_store.go create mode 100644 vendor/github.com/containerd/containerd/content/proxy/content_writer.go create mode 100644 vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go create mode 100644 vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go create mode 100644 vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default_unsupported.go create mode 100644 vendor/github.com/containerd/containerd/diff/apply/apply.go create mode 100644 vendor/github.com/containerd/containerd/diff/apply/apply_linux.go create mode 100644 vendor/github.com/containerd/containerd/diff/apply/apply_other.go create mode 100644 vendor/github.com/containerd/containerd/diff/diff.go create mode 100644 vendor/github.com/containerd/containerd/diff/stream.go create mode 100644 vendor/github.com/containerd/containerd/diff/stream_unix.go create mode 100644 vendor/github.com/containerd/containerd/diff/stream_windows.go create mode 100644 vendor/github.com/containerd/containerd/diff/walking/differ.go create mode 100644 vendor/github.com/containerd/containerd/events/events.go create mode 100644 vendor/github.com/containerd/containerd/events/exchange/exchange.go create mode 100644 vendor/github.com/containerd/containerd/filters/adaptor.go create mode 100644 vendor/github.com/containerd/containerd/filters/filter.go create mode 100644 vendor/github.com/containerd/containerd/filters/parser.go create mode 100644 vendor/github.com/containerd/containerd/filters/quote.go create mode 100644 vendor/github.com/containerd/containerd/filters/scanner.go create mode 100644 vendor/github.com/containerd/containerd/gc/gc.go create mode 100644 vendor/github.com/containerd/containerd/identifiers/validate.go create mode 100644 vendor/github.com/containerd/containerd/images/annotations.go create mode 100644 vendor/github.com/containerd/containerd/images/archive/exporter.go create mode 100644 vendor/github.com/containerd/containerd/images/archive/importer.go create mode 100644 vendor/github.com/containerd/containerd/images/archive/reference.go create mode 100644 vendor/github.com/containerd/containerd/images/handlers.go create mode 100644 vendor/github.com/containerd/containerd/images/image.go create mode 100644 vendor/github.com/containerd/containerd/images/importexport.go create mode 100644 vendor/github.com/containerd/containerd/images/mediatypes.go create mode 100644 vendor/github.com/containerd/containerd/labels/validate.go create mode 100644 vendor/github.com/containerd/containerd/leases/context.go create mode 100644 vendor/github.com/containerd/containerd/leases/grpc.go create mode 100644 vendor/github.com/containerd/containerd/leases/id.go create mode 100644 vendor/github.com/containerd/containerd/leases/lease.go create mode 100644 vendor/github.com/containerd/containerd/metadata/adaptors.go create mode 100644 vendor/github.com/containerd/containerd/metadata/bolt.go create mode 100644 vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go create mode 100644 vendor/github.com/containerd/containerd/metadata/buckets.go create mode 100644 vendor/github.com/containerd/containerd/metadata/containers.go create mode 100644 vendor/github.com/containerd/containerd/metadata/content.go create mode 100644 vendor/github.com/containerd/containerd/metadata/db.go create mode 100644 vendor/github.com/containerd/containerd/metadata/gc.go create mode 100644 vendor/github.com/containerd/containerd/metadata/images.go create mode 100644 vendor/github.com/containerd/containerd/metadata/leases.go create mode 100644 vendor/github.com/containerd/containerd/metadata/migrations.go create mode 100644 vendor/github.com/containerd/containerd/metadata/namespaces.go create mode 100644 vendor/github.com/containerd/containerd/metadata/snapshot.go create mode 100644 vendor/github.com/containerd/containerd/mount/lookup_unix.go create mode 100644 vendor/github.com/containerd/containerd/mount/lookup_unsupported.go create mode 100644 vendor/github.com/containerd/containerd/mount/mount.go create mode 100644 vendor/github.com/containerd/containerd/mount/mount_linux.go create mode 100644 vendor/github.com/containerd/containerd/mount/mount_unix.go create mode 100644 vendor/github.com/containerd/containerd/mount/mount_windows.go create mode 100644 vendor/github.com/containerd/containerd/mount/mountinfo.go create mode 100644 vendor/github.com/containerd/containerd/mount/mountinfo_bsd.go create mode 100644 vendor/github.com/containerd/containerd/mount/mountinfo_linux.go create mode 100644 vendor/github.com/containerd/containerd/mount/mountinfo_unsupported.go create mode 100644 vendor/github.com/containerd/containerd/mount/temp.go create mode 100644 vendor/github.com/containerd/containerd/mount/temp_unix.go create mode 100644 vendor/github.com/containerd/containerd/mount/temp_unsupported.go create mode 100644 vendor/github.com/containerd/containerd/namespaces/context.go create mode 100644 vendor/github.com/containerd/containerd/namespaces/grpc.go create mode 100644 vendor/github.com/containerd/containerd/namespaces/store.go create mode 100644 vendor/github.com/containerd/containerd/namespaces/ttrpc.go create mode 100644 vendor/github.com/containerd/containerd/oci/client.go create mode 100644 vendor/github.com/containerd/containerd/oci/spec.go create mode 100644 vendor/github.com/containerd/containerd/oci/spec_opts.go create mode 100644 vendor/github.com/containerd/containerd/oci/spec_opts_linux.go create mode 100644 vendor/github.com/containerd/containerd/oci/spec_opts_unix.go create mode 100644 vendor/github.com/containerd/containerd/oci/spec_opts_windows.go create mode 100644 vendor/github.com/containerd/containerd/platforms/compare.go create mode 100644 vendor/github.com/containerd/containerd/platforms/cpuinfo.go create mode 100644 vendor/github.com/containerd/containerd/platforms/database.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_unix.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_windows.go create mode 100644 vendor/github.com/containerd/containerd/platforms/platforms.go create mode 100644 vendor/github.com/containerd/containerd/plugin/context.go create mode 100644 vendor/github.com/containerd/containerd/plugin/plugin.go create mode 100644 vendor/github.com/containerd/containerd/plugin/plugin_go18.go create mode 100644 vendor/github.com/containerd/containerd/plugin/plugin_other.go create mode 100644 vendor/github.com/containerd/containerd/reference/docker/reference.go create mode 100644 vendor/github.com/containerd/containerd/reference/reference.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/auth.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/authorizer.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/converter.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/errcode.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/errdesc.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/fetcher.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/handler.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/pusher.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/registry.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/resolver.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/scope.go create mode 100644 vendor/github.com/containerd/containerd/remotes/docker/status.go create mode 100644 vendor/github.com/containerd/containerd/remotes/handlers.go create mode 100644 vendor/github.com/containerd/containerd/remotes/resolver.go create mode 100644 vendor/github.com/containerd/containerd/rootfs/apply.go create mode 100644 vendor/github.com/containerd/containerd/rootfs/diff.go create mode 100644 vendor/github.com/containerd/containerd/rootfs/init.go create mode 100644 vendor/github.com/containerd/containerd/rootfs/init_linux.go create mode 100644 vendor/github.com/containerd/containerd/rootfs/init_other.go create mode 100644 vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go create mode 100644 vendor/github.com/containerd/containerd/snapshots/native/native.go create mode 100644 vendor/github.com/containerd/containerd/snapshots/overlay/check.go create mode 100644 vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go create mode 100644 vendor/github.com/containerd/containerd/snapshots/snapshotter.go create mode 100644 vendor/github.com/containerd/containerd/snapshots/storage/bolt.go create mode 100644 vendor/github.com/containerd/containerd/snapshots/storage/metastore.go create mode 100644 vendor/github.com/containerd/containerd/version/version.go create mode 100644 vendor/github.com/containerd/go-cni/.travis.yml create mode 100644 vendor/github.com/containerd/go-cni/LICENSE create mode 100644 vendor/github.com/containerd/go-cni/README.md create mode 100644 vendor/github.com/containerd/go-cni/cni.go create mode 100644 vendor/github.com/containerd/go-cni/errors.go create mode 100644 vendor/github.com/containerd/go-cni/go.mod create mode 100644 vendor/github.com/containerd/go-cni/go.sum create mode 100644 vendor/github.com/containerd/go-cni/helper.go create mode 100644 vendor/github.com/containerd/go-cni/namespace.go create mode 100644 vendor/github.com/containerd/go-cni/namespace_opts.go create mode 100644 vendor/github.com/containerd/go-cni/opts.go create mode 100644 vendor/github.com/containerd/go-cni/result.go create mode 100644 vendor/github.com/containerd/go-cni/testutils.go create mode 100644 vendor/github.com/containerd/go-cni/types.go create mode 100644 vendor/github.com/containerd/go-runc/.travis.yml create mode 100644 vendor/github.com/containerd/go-runc/LICENSE create mode 100644 vendor/github.com/containerd/go-runc/README.md create mode 100644 vendor/github.com/containerd/go-runc/command_linux.go create mode 100644 vendor/github.com/containerd/go-runc/command_other.go create mode 100644 vendor/github.com/containerd/go-runc/console.go create mode 100644 vendor/github.com/containerd/go-runc/container.go create mode 100644 vendor/github.com/containerd/go-runc/events.go create mode 100644 vendor/github.com/containerd/go-runc/go.mod create mode 100644 vendor/github.com/containerd/go-runc/go.sum create mode 100644 vendor/github.com/containerd/go-runc/io.go create mode 100644 vendor/github.com/containerd/go-runc/io_unix.go create mode 100644 vendor/github.com/containerd/go-runc/io_windows.go create mode 100644 vendor/github.com/containerd/go-runc/monitor.go create mode 100644 vendor/github.com/containerd/go-runc/runc.go create mode 100644 vendor/github.com/containerd/go-runc/utils.go create mode 100644 vendor/github.com/containerd/ttrpc/.gitignore create mode 100644 vendor/github.com/containerd/ttrpc/.travis.yml create mode 100644 vendor/github.com/containerd/ttrpc/LICENSE create mode 100644 vendor/github.com/containerd/ttrpc/README.md create mode 100644 vendor/github.com/containerd/ttrpc/channel.go create mode 100644 vendor/github.com/containerd/ttrpc/client.go create mode 100644 vendor/github.com/containerd/ttrpc/codec.go create mode 100644 vendor/github.com/containerd/ttrpc/config.go create mode 100644 vendor/github.com/containerd/ttrpc/go.mod create mode 100644 vendor/github.com/containerd/ttrpc/go.sum create mode 100644 vendor/github.com/containerd/ttrpc/handshake.go create mode 100644 vendor/github.com/containerd/ttrpc/interceptor.go create mode 100644 vendor/github.com/containerd/ttrpc/metadata.go create mode 100644 vendor/github.com/containerd/ttrpc/server.go create mode 100644 vendor/github.com/containerd/ttrpc/services.go create mode 100644 vendor/github.com/containerd/ttrpc/types.go create mode 100644 vendor/github.com/containerd/ttrpc/unixcreds_linux.go create mode 100644 vendor/github.com/containerd/typeurl/.gitignore create mode 100644 vendor/github.com/containerd/typeurl/.travis.yml create mode 100644 vendor/github.com/containerd/typeurl/LICENSE create mode 100644 vendor/github.com/containerd/typeurl/README.md create mode 100644 vendor/github.com/containerd/typeurl/doc.go create mode 100644 vendor/github.com/containerd/typeurl/go.mod create mode 100644 vendor/github.com/containerd/typeurl/go.sum create mode 100644 vendor/github.com/containerd/typeurl/types.go create mode 100644 vendor/github.com/containernetworking/cni/LICENSE create mode 100644 vendor/github.com/containernetworking/cni/libcni/api.go create mode 100644 vendor/github.com/containernetworking/cni/libcni/conf.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/args.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/exec.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/find.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/020/types.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/args.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/current/types.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/types.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/conf.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/plugin.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/reconcile.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/version.go create mode 100644 vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/locker/README.md create mode 100644 vendor/github.com/docker/docker/pkg/locker/locker.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/README.md create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/reexec.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/README.md create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_linux_mipsx.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/signal/trap.go create mode 100644 vendor/github.com/docker/go-events/.gitignore create mode 100644 vendor/github.com/docker/go-events/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/go-events/LICENSE create mode 100644 vendor/github.com/docker/go-events/MAINTAINERS create mode 100644 vendor/github.com/docker/go-events/README.md create mode 100644 vendor/github.com/docker/go-events/broadcast.go create mode 100644 vendor/github.com/docker/go-events/channel.go create mode 100644 vendor/github.com/docker/go-events/errors.go create mode 100644 vendor/github.com/docker/go-events/event.go create mode 100644 vendor/github.com/docker/go-events/filter.go create mode 100644 vendor/github.com/docker/go-events/queue.go create mode 100644 vendor/github.com/docker/go-events/retry.go create mode 100644 vendor/github.com/docker/libnetwork/LICENSE create mode 100644 vendor/github.com/docker/libnetwork/resolvconf/README.md create mode 100644 vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go create mode 100644 vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go create mode 100644 vendor/github.com/docker/libnetwork/types/types.go create mode 100644 vendor/github.com/genuinetools/img/AUTHORS create mode 100644 vendor/github.com/genuinetools/img/LICENSE create mode 100644 vendor/github.com/genuinetools/img/client/client.go create mode 100644 vendor/github.com/genuinetools/img/client/controller.go create mode 100644 vendor/github.com/genuinetools/img/client/diskusage.go create mode 100644 vendor/github.com/genuinetools/img/client/list.go create mode 100644 vendor/github.com/genuinetools/img/client/prune.go create mode 100644 vendor/github.com/genuinetools/img/client/pull.go create mode 100644 vendor/github.com/genuinetools/img/client/push.go create mode 100644 vendor/github.com/genuinetools/img/client/remove.go create mode 100644 vendor/github.com/genuinetools/img/client/save.go create mode 100644 vendor/github.com/genuinetools/img/client/session.go create mode 100644 vendor/github.com/genuinetools/img/client/solve.go create mode 100644 vendor/github.com/genuinetools/img/client/tag.go create mode 100644 vendor/github.com/genuinetools/img/client/unpack.go create mode 100644 vendor/github.com/genuinetools/img/client/workeropt.go create mode 100644 vendor/github.com/genuinetools/img/types/types.go create mode 100644 vendor/github.com/gofrs/flock/.gitignore create mode 100644 vendor/github.com/gofrs/flock/.travis.yml create mode 100644 vendor/github.com/gofrs/flock/LICENSE create mode 100644 vendor/github.com/gofrs/flock/README.md create mode 100644 vendor/github.com/gofrs/flock/appveyor.yml create mode 100644 vendor/github.com/gofrs/flock/flock.go create mode 100644 vendor/github.com/gofrs/flock/flock_unix.go create mode 100644 vendor/github.com/gofrs/flock/flock_winapi.go create mode 100644 vendor/github.com/gofrs/flock/flock_windows.go create mode 100644 vendor/github.com/gogo/googleapis/LICENSE create mode 100644 vendor/github.com/gogo/googleapis/google/rpc/code.pb.go create mode 100644 vendor/github.com/gogo/googleapis/google/rpc/code.proto create mode 100644 vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go create mode 100644 vendor/github.com/gogo/googleapis/google/rpc/error_details.proto create mode 100644 vendor/github.com/gogo/googleapis/google/rpc/status.pb.go create mode 100644 vendor/github.com/gogo/googleapis/google/rpc/status.proto create mode 100644 vendor/github.com/gogo/protobuf/types/any.go create mode 100644 vendor/github.com/gogo/protobuf/types/any.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/api.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/doc.go create mode 100644 vendor/github.com/gogo/protobuf/types/duration.go create mode 100644 vendor/github.com/gogo/protobuf/types/duration.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/duration_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/types/empty.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/field_mask.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/protosize.go create mode 100644 vendor/github.com/gogo/protobuf/types/source_context.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/struct.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/types/timestamp.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/timestamp_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/types/type.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/wrappers.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/wrappers_gogo.go create mode 100644 vendor/github.com/google/shlex/COPYING create mode 100644 vendor/github.com/google/shlex/README create mode 100644 vendor/github.com/google/shlex/shlex.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/.gitignore create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/LICENSE create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/README.md create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/edges.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/iradix.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/iter.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/node.go create mode 100644 vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go create mode 100644 vendor/github.com/hashicorp/golang-lru/LICENSE create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go create mode 100644 vendor/github.com/ishidawataru/sctp/.gitignore create mode 100644 vendor/github.com/ishidawataru/sctp/.travis.yml create mode 100644 vendor/github.com/ishidawataru/sctp/GO_LICENSE create mode 100644 vendor/github.com/ishidawataru/sctp/LICENSE create mode 100644 vendor/github.com/ishidawataru/sctp/NOTICE create mode 100644 vendor/github.com/ishidawataru/sctp/README.md create mode 100644 vendor/github.com/ishidawataru/sctp/go.mod create mode 100644 vendor/github.com/ishidawataru/sctp/ipsock_linux.go create mode 100644 vendor/github.com/ishidawataru/sctp/sctp.go create mode 100644 vendor/github.com/ishidawataru/sctp/sctp_linux.go create mode 100644 vendor/github.com/ishidawataru/sctp/sctp_unsupported.go create mode 100644 vendor/github.com/mitchellh/hashstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/hashstructure/README.md create mode 100644 vendor/github.com/mitchellh/hashstructure/go.mod create mode 100644 vendor/github.com/mitchellh/hashstructure/hashstructure.go create mode 100644 vendor/github.com/mitchellh/hashstructure/include.go create mode 100644 vendor/github.com/moby/buildkit/AUTHORS create mode 100644 vendor/github.com/moby/buildkit/LICENSE create mode 100644 vendor/github.com/moby/buildkit/api/services/control/control.pb.go create mode 100644 vendor/github.com/moby/buildkit/api/services/control/control.proto create mode 100644 vendor/github.com/moby/buildkit/api/services/control/generate.go create mode 100644 vendor/github.com/moby/buildkit/api/types/generate.go create mode 100644 vendor/github.com/moby/buildkit/api/types/worker.pb.go create mode 100644 vendor/github.com/moby/buildkit/api/types/worker.proto create mode 100644 vendor/github.com/moby/buildkit/cache/blobs/blobs.go create mode 100644 vendor/github.com/moby/buildkit/cache/blobs/compression.go create mode 100644 vendor/github.com/moby/buildkit/cache/contenthash/checksum.go create mode 100644 vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go create mode 100644 vendor/github.com/moby/buildkit/cache/contenthash/checksum.proto create mode 100644 vendor/github.com/moby/buildkit/cache/contenthash/filehash.go create mode 100644 vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go create mode 100644 vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go create mode 100644 vendor/github.com/moby/buildkit/cache/contenthash/generate.go create mode 100644 vendor/github.com/moby/buildkit/cache/contenthash/path.go create mode 100644 vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go create mode 100644 vendor/github.com/moby/buildkit/cache/manager.go create mode 100644 vendor/github.com/moby/buildkit/cache/metadata.go create mode 100644 vendor/github.com/moby/buildkit/cache/metadata/metadata.go create mode 100644 vendor/github.com/moby/buildkit/cache/migrate_v2.go create mode 100644 vendor/github.com/moby/buildkit/cache/refs.go create mode 100644 vendor/github.com/moby/buildkit/cache/remotecache/export.go create mode 100644 vendor/github.com/moby/buildkit/cache/remotecache/import.go create mode 100644 vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go create mode 100644 vendor/github.com/moby/buildkit/cache/remotecache/local/local.go create mode 100644 vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go create mode 100644 vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go create mode 100644 vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go create mode 100644 vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go create mode 100644 vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go create mode 100644 vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go create mode 100644 vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go create mode 100644 vendor/github.com/moby/buildkit/cache/util/fsutil.go create mode 100644 vendor/github.com/moby/buildkit/client/build.go create mode 100644 vendor/github.com/moby/buildkit/client/buildid/metadata.go create mode 100644 vendor/github.com/moby/buildkit/client/client.go create mode 100644 vendor/github.com/moby/buildkit/client/client_unix.go create mode 100644 vendor/github.com/moby/buildkit/client/client_windows.go create mode 100644 vendor/github.com/moby/buildkit/client/connhelper/connhelper.go create mode 100644 vendor/github.com/moby/buildkit/client/diskusage.go create mode 100644 vendor/github.com/moby/buildkit/client/exporters.go create mode 100644 vendor/github.com/moby/buildkit/client/filter.go create mode 100644 vendor/github.com/moby/buildkit/client/graph.go create mode 100644 vendor/github.com/moby/buildkit/client/llb/definition.go create mode 100644 vendor/github.com/moby/buildkit/client/llb/exec.go create mode 100644 vendor/github.com/moby/buildkit/client/llb/fileop.go create mode 100644 vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go create mode 100644 vendor/github.com/moby/buildkit/client/llb/marshal.go create mode 100644 vendor/github.com/moby/buildkit/client/llb/meta.go create mode 100644 vendor/github.com/moby/buildkit/client/llb/resolver.go create mode 100644 vendor/github.com/moby/buildkit/client/llb/source.go create mode 100644 vendor/github.com/moby/buildkit/client/llb/state.go create mode 100644 vendor/github.com/moby/buildkit/client/ociindex/ociindex.go create mode 100644 vendor/github.com/moby/buildkit/client/prune.go create mode 100644 vendor/github.com/moby/buildkit/client/solve.go create mode 100644 vendor/github.com/moby/buildkit/client/workers.go create mode 100644 vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go create mode 100644 vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go create mode 100644 vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go create mode 100644 vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go create mode 100644 vendor/github.com/moby/buildkit/control/control.go create mode 100644 vendor/github.com/moby/buildkit/control/gateway/gateway.go create mode 100644 vendor/github.com/moby/buildkit/executor/executor.go create mode 100644 vendor/github.com/moby/buildkit/executor/oci/hosts.go create mode 100644 vendor/github.com/moby/buildkit/executor/oci/mounts.go create mode 100644 vendor/github.com/moby/buildkit/executor/oci/resolvconf.go create mode 100644 vendor/github.com/moby/buildkit/executor/oci/spec.go create mode 100644 vendor/github.com/moby/buildkit/executor/oci/spec_unix.go create mode 100644 vendor/github.com/moby/buildkit/executor/oci/user.go create mode 100644 vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go create mode 100644 vendor/github.com/moby/buildkit/exporter/containerimage/export.go create mode 100644 vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go create mode 100644 vendor/github.com/moby/buildkit/exporter/containerimage/writer.go create mode 100644 vendor/github.com/moby/buildkit/exporter/exporter.go create mode 100644 vendor/github.com/moby/buildkit/exporter/local/export.go create mode 100644 vendor/github.com/moby/buildkit/exporter/oci/export.go create mode 100644 vendor/github.com/moby/buildkit/exporter/tar/export.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunnetwork.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunsecurity.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nosecrets.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nossh.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runnetwork.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runsecurity.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nosecrets.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nossh.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runsecurity.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/support.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go create mode 100644 vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest create mode 100644 vendor/github.com/moby/buildkit/frontend/frontend.go create mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/client/client.go create mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/client/result.go create mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go create mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go create mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/gateway.go create mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go create mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go create mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go create mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto create mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go create mode 100644 vendor/github.com/moby/buildkit/frontend/result.go create mode 100644 vendor/github.com/moby/buildkit/identity/randomid.go create mode 100644 vendor/github.com/moby/buildkit/session/auth/auth.go create mode 100644 vendor/github.com/moby/buildkit/session/auth/auth.pb.go create mode 100644 vendor/github.com/moby/buildkit/session/auth/auth.proto create mode 100644 vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go create mode 100644 vendor/github.com/moby/buildkit/session/auth/generate.go create mode 100644 vendor/github.com/moby/buildkit/session/content/attachable.go create mode 100644 vendor/github.com/moby/buildkit/session/content/caller.go create mode 100644 vendor/github.com/moby/buildkit/session/context.go create mode 100644 vendor/github.com/moby/buildkit/session/filesync/diffcopy.go create mode 100644 vendor/github.com/moby/buildkit/session/filesync/filesync.go create mode 100644 vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go create mode 100644 vendor/github.com/moby/buildkit/session/filesync/filesync.proto create mode 100644 vendor/github.com/moby/buildkit/session/filesync/generate.go create mode 100644 vendor/github.com/moby/buildkit/session/grpc.go create mode 100644 vendor/github.com/moby/buildkit/session/grpchijack/dial.go create mode 100644 vendor/github.com/moby/buildkit/session/grpchijack/hijack.go create mode 100644 vendor/github.com/moby/buildkit/session/manager.go create mode 100644 vendor/github.com/moby/buildkit/session/secrets/generate.go create mode 100644 vendor/github.com/moby/buildkit/session/secrets/secrets.go create mode 100644 vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go create mode 100644 vendor/github.com/moby/buildkit/session/secrets/secrets.proto create mode 100644 vendor/github.com/moby/buildkit/session/session.go create mode 100644 vendor/github.com/moby/buildkit/session/sshforward/copy.go create mode 100644 vendor/github.com/moby/buildkit/session/sshforward/generate.go create mode 100644 vendor/github.com/moby/buildkit/session/sshforward/ssh.go create mode 100644 vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go create mode 100644 vendor/github.com/moby/buildkit/session/sshforward/ssh.proto create mode 100644 vendor/github.com/moby/buildkit/session/testutil/testutil.go create mode 100644 vendor/github.com/moby/buildkit/session/upload/generate.go create mode 100644 vendor/github.com/moby/buildkit/session/upload/upload.go create mode 100644 vendor/github.com/moby/buildkit/session/upload/upload.pb.go create mode 100644 vendor/github.com/moby/buildkit/session/upload/upload.proto create mode 100644 vendor/github.com/moby/buildkit/snapshot/containerd/content.go create mode 100644 vendor/github.com/moby/buildkit/snapshot/containerd/snapshotter.go create mode 100644 vendor/github.com/moby/buildkit/snapshot/imagerefchecker/checker.go create mode 100644 vendor/github.com/moby/buildkit/snapshot/localmounter.go create mode 100644 vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go create mode 100644 vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go create mode 100644 vendor/github.com/moby/buildkit/snapshot/snapshotter.go create mode 100644 vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go create mode 100644 vendor/github.com/moby/buildkit/solver/cachekey.go create mode 100644 vendor/github.com/moby/buildkit/solver/cachemanager.go create mode 100644 vendor/github.com/moby/buildkit/solver/cachestorage.go create mode 100644 vendor/github.com/moby/buildkit/solver/combinedcache.go create mode 100644 vendor/github.com/moby/buildkit/solver/edge.go create mode 100644 vendor/github.com/moby/buildkit/solver/exporter.go create mode 100644 vendor/github.com/moby/buildkit/solver/index.go create mode 100644 vendor/github.com/moby/buildkit/solver/internal/pipe/pipe.go create mode 100644 vendor/github.com/moby/buildkit/solver/jobs.go create mode 100644 vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go create mode 100644 vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go create mode 100644 vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go create mode 100644 vendor/github.com/moby/buildkit/solver/llbsolver/file/unpack.go create mode 100644 vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go create mode 100644 vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go create mode 100644 vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go create mode 100644 vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go create mode 100644 vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go create mode 100644 vendor/github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes/types.go create mode 100644 vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go create mode 100644 vendor/github.com/moby/buildkit/solver/llbsolver/result.go create mode 100644 vendor/github.com/moby/buildkit/solver/llbsolver/solver.go create mode 100644 vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go create mode 100644 vendor/github.com/moby/buildkit/solver/memorycachestorage.go create mode 100644 vendor/github.com/moby/buildkit/solver/pb/attr.go create mode 100644 vendor/github.com/moby/buildkit/solver/pb/caps.go create mode 100644 vendor/github.com/moby/buildkit/solver/pb/const.go create mode 100644 vendor/github.com/moby/buildkit/solver/pb/generate.go create mode 100644 vendor/github.com/moby/buildkit/solver/pb/ops.pb.go create mode 100644 vendor/github.com/moby/buildkit/solver/pb/ops.proto create mode 100644 vendor/github.com/moby/buildkit/solver/pb/platform.go create mode 100644 vendor/github.com/moby/buildkit/solver/progress.go create mode 100644 vendor/github.com/moby/buildkit/solver/result.go create mode 100644 vendor/github.com/moby/buildkit/solver/scheduler.go create mode 100644 vendor/github.com/moby/buildkit/solver/types.go create mode 100644 vendor/github.com/moby/buildkit/source/containerimage/pull.go create mode 100644 vendor/github.com/moby/buildkit/source/git/gitsource.go create mode 100644 vendor/github.com/moby/buildkit/source/git/gitsource_unix.go create mode 100644 vendor/github.com/moby/buildkit/source/git/gitsource_windows.go create mode 100644 vendor/github.com/moby/buildkit/source/gitidentifier.go create mode 100644 vendor/github.com/moby/buildkit/source/http/httpsource.go create mode 100644 vendor/github.com/moby/buildkit/source/http/transport.go create mode 100644 vendor/github.com/moby/buildkit/source/identifier.go create mode 100644 vendor/github.com/moby/buildkit/source/local/local.go create mode 100644 vendor/github.com/moby/buildkit/source/manager.go create mode 100644 vendor/github.com/moby/buildkit/util/apicaps/caps.go create mode 100644 vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go create mode 100644 vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto create mode 100644 vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go create mode 100644 vendor/github.com/moby/buildkit/util/appcontext/appcontext.go create mode 100644 vendor/github.com/moby/buildkit/util/appcontext/appcontext_unix.go create mode 100644 vendor/github.com/moby/buildkit/util/appcontext/appcontext_windows.go create mode 100644 vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go create mode 100644 vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/386_binary.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/386_check.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/386_check_386.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/Dockerfile create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/Makefile create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_binary.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check_amd64.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_binary.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check_arm64.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/arm_binary.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check_arm.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/check.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/check_unix.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/check_windows.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_binary.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check_ppc64le.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_binary.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check_riscv64.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_binary.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check.go create mode 100644 vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check_s390x.go create mode 100644 vendor/github.com/moby/buildkit/util/cond/cond.go create mode 100644 vendor/github.com/moby/buildkit/util/contentutil/buffer.go create mode 100644 vendor/github.com/moby/buildkit/util/contentutil/copy.go create mode 100644 vendor/github.com/moby/buildkit/util/contentutil/fetcher.go create mode 100644 vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go create mode 100644 vendor/github.com/moby/buildkit/util/contentutil/pusher.go create mode 100644 vendor/github.com/moby/buildkit/util/contentutil/refs.go create mode 100644 vendor/github.com/moby/buildkit/util/entitlements/entitlements.go create mode 100644 vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go create mode 100644 vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go create mode 100644 vendor/github.com/moby/buildkit/util/imageutil/config.go create mode 100644 vendor/github.com/moby/buildkit/util/imageutil/schema1.go create mode 100644 vendor/github.com/moby/buildkit/util/leaseutil/manager.go create mode 100644 vendor/github.com/moby/buildkit/util/network/cniprovider/allowempty.s create mode 100644 vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go create mode 100644 vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go create mode 100644 vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go create mode 100644 vendor/github.com/moby/buildkit/util/network/cniprovider/createns_nolinux.go create mode 100644 vendor/github.com/moby/buildkit/util/network/host.go create mode 100644 vendor/github.com/moby/buildkit/util/network/netproviders/network.go create mode 100644 vendor/github.com/moby/buildkit/util/network/network.go create mode 100644 vendor/github.com/moby/buildkit/util/network/none.go create mode 100644 vendor/github.com/moby/buildkit/util/progress/logs/logs.go create mode 100644 vendor/github.com/moby/buildkit/util/progress/multireader.go create mode 100644 vendor/github.com/moby/buildkit/util/progress/multiwriter.go create mode 100644 vendor/github.com/moby/buildkit/util/progress/progress.go create mode 100644 vendor/github.com/moby/buildkit/util/pull/pull.go create mode 100644 vendor/github.com/moby/buildkit/util/pull/resolver.go create mode 100644 vendor/github.com/moby/buildkit/util/push/push.go create mode 100644 vendor/github.com/moby/buildkit/util/resolver/resolver.go create mode 100644 vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go create mode 100644 vendor/github.com/moby/buildkit/util/system/path_unix.go create mode 100644 vendor/github.com/moby/buildkit/util/system/path_windows.go create mode 100644 vendor/github.com/moby/buildkit/util/system/seccomp_linux.go create mode 100644 vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go create mode 100644 vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go create mode 100644 vendor/github.com/moby/buildkit/util/throttle/throttle.go create mode 100644 vendor/github.com/moby/buildkit/util/tracing/multispan.go create mode 100644 vendor/github.com/moby/buildkit/util/tracing/tracing.go create mode 100644 vendor/github.com/moby/buildkit/util/winlayers/applier.go create mode 100644 vendor/github.com/moby/buildkit/util/winlayers/context.go create mode 100644 vendor/github.com/moby/buildkit/util/winlayers/differ.go create mode 100644 vendor/github.com/moby/buildkit/worker/base/worker.go create mode 100644 vendor/github.com/moby/buildkit/worker/cacheresult.go create mode 100644 vendor/github.com/moby/buildkit/worker/filter.go create mode 100644 vendor/github.com/moby/buildkit/worker/result.go create mode 100644 vendor/github.com/moby/buildkit/worker/worker.go create mode 100644 vendor/github.com/moby/buildkit/worker/workercontroller.go create mode 100644 vendor/github.com/moby/sys/mount/doc.go rename vendor/github.com/moby/sys/mount/{flags_freebsd.go => flags_bsd.go} (79%) rename vendor/github.com/moby/sys/mount/{flags.go => flags_unix.go} (99%) delete mode 100644 vendor/github.com/moby/sys/mount/flags_unsupported.go delete mode 100644 vendor/github.com/moby/sys/mount/mount.go create mode 100644 vendor/github.com/moby/sys/mount/mount_unix.go rename vendor/github.com/moby/sys/mount/{mounter_freebsd.go => mounter_bsd.go} (97%) delete mode 100644 vendor/github.com/moby/sys/mount/unmount_unix.go delete mode 100644 vendor/github.com/moby/sys/mount/unmount_unsupported.go create mode 100644 vendor/github.com/moby/sys/mountinfo/doc.go create mode 100644 vendor/github.com/moby/sys/mountinfo/go.sum create mode 100644 vendor/github.com/moby/sys/mountinfo/mounted_linux.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mounted_unix.go rename vendor/github.com/moby/sys/mountinfo/{mountinfo_freebsd.go => mountinfo_bsd.go} (73%) create mode 100644 vendor/github.com/opencontainers/image-spec/identity/chainid.go create mode 100644 vendor/github.com/opencontainers/image-spec/identity/helpers.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go rename vendor/github.com/opencontainers/runc/libcontainer/system/{syscall_linux_arm.go => syscall_linux_32.go} (61%) delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_windows.go create mode 100644 vendor/github.com/opentracing-contrib/go-stdlib/LICENSE create mode 100644 vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go create mode 100644 vendor/github.com/opentracing-contrib/go-stdlib/nethttp/doc.go create mode 100644 vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go create mode 100644 vendor/github.com/opentracing/opentracing-go/.gitignore create mode 100644 vendor/github.com/opentracing/opentracing-go/.travis.yml create mode 100644 vendor/github.com/opentracing/opentracing-go/CHANGELOG.md create mode 100644 vendor/github.com/opentracing/opentracing-go/LICENSE create mode 100644 vendor/github.com/opentracing/opentracing-go/Makefile create mode 100644 vendor/github.com/opentracing/opentracing-go/README.md create mode 100644 vendor/github.com/opentracing/opentracing-go/ext/tags.go create mode 100644 vendor/github.com/opentracing/opentracing-go/globaltracer.go create mode 100644 vendor/github.com/opentracing/opentracing-go/gocontext.go create mode 100644 vendor/github.com/opentracing/opentracing-go/log/field.go create mode 100644 vendor/github.com/opentracing/opentracing-go/log/util.go create mode 100644 vendor/github.com/opentracing/opentracing-go/noop.go create mode 100644 vendor/github.com/opentracing/opentracing-go/propagation.go create mode 100644 vendor/github.com/opentracing/opentracing-go/span.go create mode 100644 vendor/github.com/opentracing/opentracing-go/tracer.go delete mode 100644 vendor/github.com/pelletier/go-toml/.dockerignore create mode 100644 vendor/github.com/pelletier/go-toml/.travis.yml delete mode 100644 vendor/github.com/pelletier/go-toml/CONTRIBUTING.md delete mode 100644 vendor/github.com/pelletier/go-toml/Dockerfile delete mode 100644 vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md delete mode 100644 vendor/github.com/pelletier/go-toml/azure-pipelines.yml delete mode 100644 vendor/github.com/pelletier/go-toml/fuzzit.sh delete mode 100644 vendor/github.com/pelletier/go-toml/go.mod delete mode 100644 vendor/github.com/pelletier/go-toml/go.sum delete mode 100644 vendor/github.com/pelletier/go-toml/localtime.go delete mode 100644 vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml create mode 100644 vendor/github.com/pelletier/go-toml/test.sh create mode 100644 vendor/github.com/spf13/viper/.editorconfig delete mode 100644 vendor/github.com/spf13/viper/.travis.yml create mode 100644 vendor/github.com/syndtr/gocapability/LICENSE create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability_linux.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability_noop.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/enum.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/enum_gen.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/syscall_linux.go create mode 100644 vendor/github.com/tonistiigi/fsutil/.travis.yml create mode 100644 vendor/github.com/tonistiigi/fsutil/LICENSE create mode 100644 vendor/github.com/tonistiigi/fsutil/chtimes_linux.go create mode 100644 vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/copy.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/copy_darwin.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/hardlink.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/hardlink_windows.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/mkdir.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/stat_bsd.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/stat_sysv.go create mode 100644 vendor/github.com/tonistiigi/fsutil/diff.go create mode 100644 vendor/github.com/tonistiigi/fsutil/diff_containerd.go create mode 100644 vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go create mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter.go create mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go create mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go create mode 100644 vendor/github.com/tonistiigi/fsutil/followlinks.go create mode 100644 vendor/github.com/tonistiigi/fsutil/fs.go create mode 100644 vendor/github.com/tonistiigi/fsutil/go.mod create mode 100644 vendor/github.com/tonistiigi/fsutil/go.sum create mode 100644 vendor/github.com/tonistiigi/fsutil/hardlinks.go create mode 100644 vendor/github.com/tonistiigi/fsutil/readme.md create mode 100644 vendor/github.com/tonistiigi/fsutil/receive.go create mode 100644 vendor/github.com/tonistiigi/fsutil/send.go create mode 100644 vendor/github.com/tonistiigi/fsutil/stat.go create mode 100644 vendor/github.com/tonistiigi/fsutil/stat_unix.go create mode 100644 vendor/github.com/tonistiigi/fsutil/stat_windows.go create mode 100644 vendor/github.com/tonistiigi/fsutil/tarwriter.go create mode 100644 vendor/github.com/tonistiigi/fsutil/types/generate.go create mode 100644 vendor/github.com/tonistiigi/fsutil/types/stat.go create mode 100644 vendor/github.com/tonistiigi/fsutil/types/stat.pb.go create mode 100644 vendor/github.com/tonistiigi/fsutil/types/stat.proto create mode 100644 vendor/github.com/tonistiigi/fsutil/types/wire.pb.go create mode 100644 vendor/github.com/tonistiigi/fsutil/types/wire.proto create mode 100644 vendor/github.com/tonistiigi/fsutil/validator.go create mode 100644 vendor/github.com/tonistiigi/fsutil/walker.go delete mode 100644 vendor/github.com/urfave/cli/CHANGELOG.md delete mode 100644 vendor/github.com/urfave/cli/CONTRIBUTING.md create mode 100644 vendor/go.etcd.io/bbolt/unsafe.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 vendor/golang.org/x/net/trace/events.go create mode 100644 vendor/golang.org/x/net/trace/histogram.go create mode 100644 vendor/golang.org/x/net/trace/trace.go create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 vendor/google.golang.org/grpc/.travis.yml create mode 100644 vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md create mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md create mode 100644 vendor/google.golang.org/grpc/GOVERNANCE.md create mode 100644 vendor/google.golang.org/grpc/MAINTAINERS.md create mode 100644 vendor/google.golang.org/grpc/Makefile create mode 100644 vendor/google.golang.org/grpc/README.md create mode 100644 vendor/google.golang.org/grpc/attributes/attributes.go create mode 100644 vendor/google.golang.org/grpc/backoff.go create mode 100644 vendor/google.golang.org/grpc/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/base.go create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go create mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go create mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go create mode 100644 vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go create mode 100644 vendor/google.golang.org/grpc/call.go create mode 100644 vendor/google.golang.org/grpc/clientconn.go create mode 100644 vendor/google.golang.org/grpc/codec.go create mode 100644 vendor/google.golang.org/grpc/codegen.sh create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/credentials/go12.go create mode 100644 vendor/google.golang.org/grpc/credentials/internal/syscallconn.go create mode 100644 vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go create mode 100644 vendor/google.golang.org/grpc/credentials/tls.go create mode 100644 vendor/google.golang.org/grpc/dialoptions.go create mode 100644 vendor/google.golang.org/grpc/doc.go create mode 100644 vendor/google.golang.org/grpc/encoding/encoding.go create mode 100644 vendor/google.golang.org/grpc/encoding/proto/proto.go create mode 100644 vendor/google.golang.org/grpc/go.mod create mode 100644 vendor/google.golang.org/grpc/go.sum create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/health/client.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go create mode 100644 vendor/google.golang.org/grpc/health/regenerate.sh create mode 100644 vendor/google.golang.org/grpc/health/server.go create mode 100644 vendor/google.golang.org/grpc/install_gae.sh create mode 100644 vendor/google.golang.org/grpc/interceptor.go create mode 100644 vendor/google.golang.org/grpc/internal/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/internal/balancerload/load.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/env_config.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/method_logger.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/sink.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/util.go create mode 100644 vendor/google.golang.org/grpc/internal/buffer/unbounded.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/funcs.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/envconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/event.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/target.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/go113.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/controlbuf.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/defaults.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/flowcontrol.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/handler_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_client.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http_util.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/log.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/transport.go create mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go create mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/naming/naming.go create mode 100644 vendor/google.golang.org/grpc/peer/peer.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 vendor/google.golang.org/grpc/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/preloader.go create mode 100644 vendor/google.golang.org/grpc/proxy.go create mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/rpc_util.go create mode 100644 vendor/google.golang.org/grpc/server.go create mode 100644 vendor/google.golang.org/grpc/service_config.go create mode 100644 vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/stats/handlers.go create mode 100644 vendor/google.golang.org/grpc/stats/stats.go create mode 100644 vendor/google.golang.org/grpc/stream.go create mode 100644 vendor/google.golang.org/grpc/tap/tap.go create mode 100644 vendor/google.golang.org/grpc/trace.go create mode 100644 vendor/google.golang.org/grpc/version.go create mode 100644 vendor/google.golang.org/grpc/vet.sh diff --git a/go.mod b/go.mod index f05f8be3..97fce5e4 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/mudler/luet -go 1.12 +go 1.14 require ( github.com/DataDog/zstd v1.4.4 // indirect @@ -8,11 +8,13 @@ require ( github.com/asdine/storm v0.0.0-20190418133842-e0f77eada154 github.com/briandowns/spinner v1.7.0 github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec + github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc github.com/crillab/gophersat v1.3.2-0.20201023142334-3fc2ac466765 - github.com/docker/docker v17.12.0-ce-rc1.0.20200417035958-130b0bc6032c+incompatible - github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect + github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible + github.com/docker/go-units v0.4.0 github.com/ecooper/qlearning v0.0.0-20160612200101-3075011a69fd github.com/fsouza/go-dockerclient v1.6.4 + github.com/genuinetools/img v0.5.11 github.com/ghodss/yaml v1.0.0 github.com/google/go-containerregistry v0.2.1 github.com/hashicorp/go-multierror v1.0.0 @@ -26,7 +28,8 @@ require ( github.com/kyokomi/emoji v2.1.0+incompatible github.com/logrusorgru/aurora v0.0.0-20190417123914-21d75270181e github.com/marcsauter/single v0.0.0-20181104081128-f8bf46f26ec0 - github.com/moby/sys/mount v0.1.1-0.20200320164225-6154f11e6840 // indirect + github.com/moby/buildkit v0.7.2 + github.com/moby/sys/mount v0.2.0 // indirect github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d github.com/mudler/docker-companion v0.4.6-0.20200418093252-41846f112d87 github.com/mudler/go-pluggable v0.0.0-20201113184918-d36448fc8f82 @@ -34,19 +37,27 @@ require ( github.com/onsi/ginkgo v1.14.2 github.com/onsi/gomega v1.10.3 github.com/otiai10/copy v1.2.1-0.20200916181228-26f84a0b1578 - github.com/pelletier/go-toml v1.6.0 // indirect github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f github.com/pkg/errors v0.9.1 github.com/schollz/progressbar/v3 v3.7.1 + github.com/sirupsen/logrus v1.6.0 github.com/spf13/cobra v1.0.0 - github.com/spf13/viper v1.6.3 - go.etcd.io/bbolt v1.3.4 + github.com/spf13/viper v1.7.0 + go.etcd.io/bbolt v1.3.5 go.uber.org/atomic v1.5.1 // indirect go.uber.org/multierr v1.4.0 // indirect go.uber.org/zap v1.13.0 + golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 // indirect gopkg.in/yaml.v2 v2.3.0 - gotest.tools/v3 v3.0.2 // indirect helm.sh/helm/v3 v3.3.4 ) replace github.com/docker/docker => github.com/Luet-lab/moby v17.12.0-ce-rc1.0.20200605210607-749178b8f80d+incompatible + +replace github.com/containerd/containerd => github.com/containerd/containerd v1.3.1-0.20200227195959-4d242818bf55 + +replace github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe + +replace github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305 + +replace github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.0-rc9.0.20200221051241-688cf6d43cc4 diff --git a/go.sum b/go.sum index 4bcf6243..053167d8 100644 --- a/go.sum +++ b/go.sum @@ -15,6 +15,7 @@ cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNF cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -22,6 +23,7 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/AkihiroSuda/containerd-fuse-overlayfs v0.0.0-20200220082720-bb896865146c/go.mod h1:K4kx7xAA5JimeQCnN+dbeLlfaBxzZLaLiDD8lusFI8w= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v38.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -70,13 +72,16 @@ github.com/Masterminds/sprig/v3 v3.1.0 h1:j7GpgZ7PdFqNsmncycTHsLmVPf5/3wJtlgW9TN github.com/Masterminds/sprig/v3 v3.1.0/go.mod h1:ONGMf7UfYGAbMXCZmQLy8x3lCDIPrEZE/rU8pmrbihA= github.com/Masterminds/squirrel v1.4.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZlRVMQ/gGlzIuA= github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873 h1:93nQ7k53GjoMQ07HVP8g6Zj1fQZDDj7Xy2VkNNtvX8o= github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7 h1:ptnOoufxGSzauVTsdE+wMYnCWA301PdoN4xg5oRdZpg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -98,6 +103,7 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apex/log v1.1.1 h1:BwhRZ0qbjYtTob0I+2M+smavV0kOC8XgcnGZcyL9liA= @@ -131,6 +137,7 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= @@ -153,33 +160,53 @@ github.com/chuckpreslar/emission v0.0.0-20170206194824-a7ddd980baf9/go.mod h1:2w github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200217135630-d732e370d46d h1:UKAt78F1OvM4ceTn1VvXuYuatXohsFU1eSI2IBtTw9g= +github.com/containerd/cgroups v0.0.0-20200217135630-d732e370d46d/go.mod h1:CStdkl05lBnJej94BPFoJ7vB8cELKXwViS+dgfW0/M8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY= -github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.4 h1:3o0smo5SKY7H6AJCmJhsnCjR2/V2T8VmiHt7seN2/kI= -github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v0.0.0-20191219165238-8375c3424e4d h1:VuiIRfgJ2M3vYEU0F6E5lg3+V0l9YpbGQr3jpZor5fo= +github.com/containerd/console v0.0.0-20191219165238-8375c3424e4d/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/containerd v1.3.1-0.20200227195959-4d242818bf55 h1:FGO0nwSBESgoGCakj+w3OQXyrMLsz2omdo9b2UfG/BQ= +github.com/containerd/containerd v1.3.1-0.20200227195959-4d242818bf55/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20180921161001-7f53d412b9eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20181001140422-bd77b46c8352/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c h1:8ahmSVELW1wghbjerVAyuEYD5+Dio66RYvSS0iGfL1M= github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20191213151349-ff969a566b00 h1:lsjC5ENBl+Zgf38+B0ymougXFp0BaubeIVETltYZTQw= +github.com/containerd/fifo v0.0.0-20191213151349-ff969a566b00/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75 h1:5Q5C6jDObSVpjeX8CuZ5yac8d/KIYuPzUHbUzdL+NFw= +github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75/go.mod h1:0mg8r6FCdbxvLDqCXwAx2rO+KA37QICjKL8+wHOG5OE= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328 h1:PRTagVMbJcCezLcHXe8UJvR1oBzp2lG3CEumeFOLOds= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de h1:dlfGmNcE3jDAecLqwKPMNX6nk2qh1c1Vg1/YTzpOOF4= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v0.0.0-20200121165050-0be804eadb15 h1:+jgiLE5QylzgADj0Yldb4id1NQNRrDOROj7KDvY9PEc= +github.com/containerd/ttrpc v0.0.0-20200121165050-0be804eadb15/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd h1:JNn81o/xG+8NEo3bC/vx9pbi/g2WI8mtP2/nXzu297Y= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v0.0.0-20200205145503-b45ef1f1f737 h1:HovfQDS/K3Mr7eyS0QJLxE1CbVUhjZCl6g3OhFJgP1o= +github.com/containerd/typeurl v0.0.0-20200205145503-b45ef1f1f737/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/clair v0.0.0-20180919182544-44ae4bc9590a/go.mod h1:uXhHPWAoRqw0jJc2f8RrPCwRhIo9otQ8OEWUFtpCiwA= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= @@ -187,7 +214,10 @@ github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHo github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -215,22 +245,36 @@ github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20180920165730-54c19e67f69c/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492 h1:FwssHbCDJD025h+BchanCwE1Q8fyMgqDr2mOQAWOLGw= github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24 h1:bjsfAvm8BVtvQFxV7TYznmKa35J8+fmgrRJWvcS3yJo= +github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v0.0.0-20200223014041-6b972e50feee/go.mod h1:xgJxuOjyp98AvnpRTR1+lGOqQ493ylRnRPmewD5GWtc= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker-ce v0.0.0-20180924210327-f53bd8bb8e43/go.mod h1:l1FUGRYBvbjnZ8MS6A2xOji4aZFlY/Qmgz7p4oXH7ac= +github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.0.0-20180821093606-97c2040d34df/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-units v0.3.1/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libnetwork v0.8.0-dev.2.0.20200226230617-d8334ccdb9be h1:GJzljYRqZapOwyfeRyExF2/5qfv8f1feNDMiz0hmRPY= +github.com/docker/libnetwork v0.8.0-dev.2.0.20200226230617-d8334ccdb9be/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= @@ -256,6 +300,7 @@ github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZM github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e/go.mod h1:2H9hjfbpSMHwY503FclkV/lZTBh2YlOmLLSda12uL8c= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= @@ -266,6 +311,10 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsouza/go-dockerclient v1.6.4 h1:B+L+1lz1LUrNgEUUh8PSG76s70EYC49ssv2xvTefTMM= github.com/fsouza/go-dockerclient v1.6.4/go.mod h1:GOdftxWLWIbIWKbIMDroKFJzPdg6Iw7r+jX1DDZdVsA= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/genuinetools/img v0.5.11 h1:qIP3nVgRYbXmtSNMjmcdx+TBzWjZS03AdqTNBf6QtGY= +github.com/genuinetools/img v0.5.11/go.mod h1:m58lsi0AC97XSTXBTKpqZE1zCZpMCg9nLjikvgLCo0A= +github.com/genuinetools/pkg v0.0.0-20180910213200-1c141f661797/go.mod h1:XTcrCYlXPxnxL2UpnwuRn7tcaTn9HAhxFoFJucootk8= +github.com/genuinetools/reg v0.16.0/go.mod h1:12Fe9EIvK3dG/qWhNk5e9O96I8SGmCKLsJ8GsXUbk+Y= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -339,15 +388,23 @@ github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kETFqjFoFlOc= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.13.3/go.mod h1:2ouUT4kdhUBk7TAkHWD4SN0CdI0pgEQbo8FVHhbSKWg= +github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.7.1 h1:DP+LD/t0njgoPBvT5MJLeliUIVQR03hiKR6vezdwHlc= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.3.2 h1:kX1es4djPJrsDhY7aZKJy7aZasdcB5oSOEphMjSB53c= +github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= @@ -360,6 +417,7 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -408,6 +466,8 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20150127133951-6f45313302b9 h1:JM174NTeGNJ2m/oLH3UOWOvWQQKd+BoL3hcSCUWFLt0= +github.com/google/shlex v0.0.0-20150127133951-6f45313302b9/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -430,20 +490,26 @@ github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= @@ -451,7 +517,6 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -459,6 +524,7 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= @@ -466,6 +532,8 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c h1:nQcv325vxv2fFHJsOt53eSRf1eINt6vOdYUFfXs4rgk= +github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c/go.mod h1:fHzc09UnyJyqyW+bFuq864eh+wC7dj65aXmXLRe5to0= github.com/heroku/docker-registry-client v0.0.0-20181004091502-47ecf50fd8d4 h1:44WMsEqwiYnpHA3E4Rg1K379MH5iZllp2sO5nzXARI0= github.com/heroku/docker-registry-client v0.0.0-20181004091502-47ecf50fd8d4/go.mod h1:ceV82AfTGFCOL/b0cdpP54uKVSL1Gef0TBSTGFDuqyY= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= @@ -475,11 +543,14 @@ github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 h1:rw3IAne6CDuVFlZbPOkA7bhxlqawFh7RJJ+CejfMaxE= +github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= github.com/jedib0t/go-pretty v4.3.0+incompatible h1:CGs8AVhEKg/n9YbUenWmNStRW2PHJzaeDodcfvRAbIo= github.com/jedib0t/go-pretty v4.3.0+incompatible/go.mod h1:XemHduiw8R651AF9Pt4FwCTKeG3oo7hrHJAoznj9nag= github.com/jedib0t/go-pretty/v6 v6.0.5 h1:oOo0/jSb3NEYKT6l1hhFXoX2UZnkanMuCE2DVT1mqnE= @@ -594,6 +665,9 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y= +github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= @@ -601,10 +675,12 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/sys/mount v0.1.1-0.20200320164225-6154f11e6840 h1:8oWZ4vNoz5UeQc0l0lJMTfIjsEP5fyUI+TuK0gbrm0o= -github.com/moby/sys/mount v0.1.1-0.20200320164225-6154f11e6840/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74= -github.com/moby/sys/mountinfo v0.1.0 h1:r8vMRbMAFEAfiNptYVokP+nfxPJzvRuia5e2vzXtENo= -github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= +github.com/moby/buildkit v0.7.2 h1:wp4R0QMXSqwjTJKhhWlJNOCSQ/OVPnsCf3N8rs09+vQ= +github.com/moby/buildkit v0.7.2/go.mod h1:D3DN/Nl4DyMH1LkwpRUJuoghqdigdXd1A6HXt5aZS40= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mountinfo v0.4.0 h1:1KInV3Huv18akCu58V7lzNlt+jFmqlu1EaErnEHE/VM= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -614,6 +690,7 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d h1:fKh+rvwZQCA+TPzK0EMwwbqhjvRHaQ6H8AsVU1Wt+NQ= @@ -652,6 +729,7 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= @@ -659,6 +737,7 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= @@ -682,16 +761,22 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9.0.20200221051241-688cf6d43cc4 h1:JhRvjyrjq24YPSDS0MQo9KJHQh95naK5fYl9IT+dzPM= +github.com/opencontainers/runc v1.0.0-rc9.0.20200221051241-688cf6d43cc4/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.3.2 h1:DR4lL9SYVjgcTZKEZIncvDU06fKSc/eygjmNGOA3E1s= +github.com/opencontainers/selinux v1.3.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing-contrib/go-stdlib v0.0.0-20171029140428-b1a47cfbdd75/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= +github.com/opentracing-contrib/go-stdlib v0.0.0-20180702182724-07a764486eb1 h1:gmB1XmLjI0RXG8rJCP0PK6g8rwhX8COSGFTiOgJ4Wx4= +github.com/opentracing-contrib/go-stdlib v0.0.0-20180702182724-07a764486eb1/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v0.0.0-20171003133519-1361b9cd60be/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= @@ -710,11 +795,11 @@ github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.6.0 h1:aetoXYr0Tv7xRU/V4B4IZJ2QcbtMUFoNb3ORp7TzIK4= -github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/peterhellberg/link v1.0.0/go.mod h1:gtSlOT4jmkY8P47hbTc8PTgiDDWpdPbFYl75keYyBB8= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f h1:WyCn68lTiytVSkk7W1K9nBiSGTSRlUOdyTnSjwrIlok= github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f/go.mod h1:/iRjX3DdSK956SzsUdV55J+wIsQ+2IBWmBrB4RvZfk4= @@ -733,6 +818,7 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.0.0-20180924113449-f69c853d21c1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -746,15 +832,18 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20180920065004-418d78d0b9a7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -786,9 +875,13 @@ github.com/schollz/progressbar/v3 v3.7.1/go.mod h1:CG/f0JmacksUc6TkZToO7tVq4t03z github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.3/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= @@ -833,13 +926,18 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.6.3 h1:pDDu1OyEDTKzpJwdq4TiuLyMsUgRa/BT5cn5O62NoHs= github.com/spf13/viper v1.6.3/go.mod h1:jUMtyi0/lB5yZH/FjyGAoH7IMNrIhlBf6pXZmbMDvzw= +github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= @@ -850,6 +948,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= @@ -858,16 +958,30 @@ github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPf github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tonistiigi/fsutil v0.0.0-20200326231323-c2c7d7b0e144 h1:6RY1EKxCnPQShPM46xFDHta2JSOd+YKCgHyyBHtKuo8= +github.com/tonistiigi/fsutil v0.0.0-20200326231323-c2c7d7b0e144/go.mod h1:0G1sLZ/0ttFf09xvh7GR4AEECnjifHRNJN/sYbLianU= +github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe h1:pd7hrFSqUPxYS9IB+UMG1AB/8EXGXo17ssx0bSQ5L6Y= +github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe/go.mod h1:/+MCh11CJf2oz0BXmlmqyopK/ad1rKkcOXPoYuPCJYU= +github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk= +github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305/go.mod h1:gXOLibKqQTRAVuVZ9gX7G9Ykky8ll8yb4slxsEMoY0c= +github.com/uber/jaeger-client-go v0.0.0-20180103221425-e02c85f9069e/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v1.2.1/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vbatts/go-mtree v0.4.4 h1:+CncqETnSpxBCCUhRnNQBvxhsjWXNuc+ExZsLSNaj5o= github.com/vbatts/go-mtree v0.4.4/go.mod h1:3sazBqLG4bZYmgRTgdh9X3iKTzwBpp5CrREJDzrNSXY= github.com/vdemeester/k8s-pkg-credentialprovider v1.18.1-0.20201019120933-f1d16962a4db/go.mod h1:grWy0bkr1XO6hqbaaCKaPXqkBVlMGHYG6PGykktwbJc= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM= +github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vmihailenco/msgpack v4.0.1+incompatible h1:RMF1enSPeKTlXrXdOcqjFUElywVZjjC6pqse21bKbEU= github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= @@ -890,8 +1004,8 @@ github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wK go.etcd.io/bbolt v1.3.0/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -922,6 +1036,7 @@ go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -941,6 +1056,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -961,6 +1077,7 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -986,6 +1103,7 @@ golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180925072008-f04abc6bdfa7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1044,6 +1162,7 @@ golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1060,6 +1179,7 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1074,12 +1194,15 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1089,6 +1212,8 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02 h1:5Ftd3YbC/kANXWCBjvppvUmv1BMakgFcBKA7MpYYp4M= @@ -1139,6 +1264,7 @@ golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1195,10 +1321,12 @@ google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180924164928-221a8d4f7494/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= @@ -1209,15 +1337,18 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200227132054-3f1135a288c9/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece h1:1YM0uhfumvoDu9sx8+RyWwTI63zoCQvI23IYFRlvte0= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1228,6 +1359,7 @@ google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1286,6 +1418,7 @@ gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go index c7884e8e..713376d5 100644 --- a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go +++ b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go @@ -32,6 +32,7 @@ type Metrics struct { Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio,proto3" json:"blkio,omitempty"` Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma,proto3" json:"rdma,omitempty"` Network []*NetworkStat `protobuf:"bytes,7,rep,name=network,proto3" json:"network,omitempty"` + CgroupStats *CgroupStats `protobuf:"bytes,8,opt,name=cgroup_stats,json=cgroupStats,proto3" json:"cgroup_stats,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -608,6 +609,55 @@ func (m *NetworkStat) XXX_DiscardUnknown() { var xxx_messageInfo_NetworkStat proto.InternalMessageInfo +// CgroupStats exports per-cgroup statistics. +type CgroupStats struct { + // number of tasks sleeping + NrSleeping uint64 `protobuf:"varint,1,opt,name=nr_sleeping,json=nrSleeping,proto3" json:"nr_sleeping,omitempty"` + // number of tasks running + NrRunning uint64 `protobuf:"varint,2,opt,name=nr_running,json=nrRunning,proto3" json:"nr_running,omitempty"` + // number of tasks in stopped state + NrStopped uint64 `protobuf:"varint,3,opt,name=nr_stopped,json=nrStopped,proto3" json:"nr_stopped,omitempty"` + // number of tasks in uninterruptible state + NrUninterruptible uint64 `protobuf:"varint,4,opt,name=nr_uninterruptible,json=nrUninterruptible,proto3" json:"nr_uninterruptible,omitempty"` + // number of tasks waiting on IO + NrIoWait uint64 `protobuf:"varint,5,opt,name=nr_io_wait,json=nrIoWait,proto3" json:"nr_io_wait,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CgroupStats) Reset() { *m = CgroupStats{} } +func (*CgroupStats) ProtoMessage() {} +func (*CgroupStats) Descriptor() ([]byte, []int) { + return fileDescriptor_a17b2d87c332bfaa, []int{13} +} +func (m *CgroupStats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CgroupStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CgroupStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CgroupStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_CgroupStats.Merge(m, src) +} +func (m *CgroupStats) XXX_Size() int { + return m.Size() +} +func (m *CgroupStats) XXX_DiscardUnknown() { + xxx_messageInfo_CgroupStats.DiscardUnknown(m) +} + +var xxx_messageInfo_CgroupStats proto.InternalMessageInfo + func init() { proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics") proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat") @@ -622,6 +672,7 @@ func init() { proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat") proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry") proto.RegisterType((*NetworkStat)(nil), "io.containerd.cgroups.v1.NetworkStat") + proto.RegisterType((*CgroupStats)(nil), "io.containerd.cgroups.v1.CgroupStats") } func init() { @@ -629,105 +680,112 @@ func init() { } var fileDescriptor_a17b2d87c332bfaa = []byte{ - // 1558 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0xcf, 0x73, 0x13, 0x39, - 0x16, 0xc6, 0xb1, 0x13, 0xbb, 0x9f, 0x93, 0x90, 0x28, 0x10, 0x3a, 0x01, 0xe2, 0xe0, 0x24, 0xbb, - 0xd9, 0xa5, 0xca, 0x29, 0xd8, 0x2d, 0x6a, 0x61, 0xa1, 0xb6, 0x70, 0x80, 0x82, 0xda, 0xcd, 0x62, - 0xda, 0x49, 0xb1, 0x7b, 0xea, 0x92, 0xdb, 0xa2, 0xad, 0xc4, 0x6e, 0x35, 0x6a, 0xb5, 0xe3, 0xcc, - 0x69, 0x0e, 0x53, 0x35, 0xa7, 0xf9, 0x67, 0xe6, 0xaf, 0xe0, 0x38, 0x97, 0xa9, 0x9a, 0xb9, 0xa4, - 0x06, 0xff, 0x25, 0x53, 0x92, 0xfa, 0x87, 0x0c, 0x84, 0x8c, 0x6f, 0x2d, 0xe9, 0xfb, 0xbe, 0xf7, - 0xf4, 0xfa, 0x53, 0xeb, 0x35, 0xfc, 0xdd, 0xa7, 0xa2, 0x17, 0x77, 0x1a, 0x1e, 0x1b, 0xec, 0x79, - 0x2c, 0x10, 0x98, 0x06, 0x84, 0x77, 0xf7, 0x3c, 0x9f, 0xb3, 0x38, 0x8c, 0xf6, 0x22, 0x81, 0x45, - 0xb4, 0x37, 0xbc, 0xb7, 0x37, 0x20, 0x82, 0x53, 0x2f, 0x6a, 0x84, 0x9c, 0x09, 0x86, 0x6c, 0xca, - 0x1a, 0x39, 0xba, 0x91, 0xa0, 0x1b, 0xc3, 0x7b, 0xeb, 0xd7, 0x7c, 0xe6, 0x33, 0x05, 0xda, 0x93, - 0x4f, 0x1a, 0x5f, 0xff, 0xb1, 0x08, 0xe5, 0x03, 0xad, 0x80, 0xfe, 0x05, 0xe5, 0x5e, 0xec, 0x13, - 0xd1, 0xef, 0xd8, 0x85, 0xcd, 0xe2, 0x6e, 0xf5, 0xfe, 0x4e, 0xe3, 0x22, 0xb5, 0xc6, 0x4b, 0x0d, - 0x6c, 0x0b, 0x2c, 0x9c, 0x94, 0x85, 0x1e, 0x40, 0x29, 0xa4, 0xdd, 0xc8, 0x9e, 0xd9, 0x2c, 0xec, - 0x56, 0xef, 0xd7, 0x2f, 0x66, 0xb7, 0x68, 0x37, 0x52, 0x54, 0x85, 0x47, 0x8f, 0xa1, 0xe8, 0x85, - 0xb1, 0x5d, 0x54, 0xb4, 0x3b, 0x17, 0xd3, 0xf6, 0x5b, 0x47, 0x92, 0xd5, 0x2c, 0x8f, 0xcf, 0x6b, - 0xc5, 0xfd, 0xd6, 0x91, 0x23, 0x69, 0xe8, 0x31, 0xcc, 0x0d, 0xc8, 0x80, 0xf1, 0x33, 0xbb, 0xa4, - 0x04, 0xb6, 0x2f, 0x16, 0x38, 0x50, 0x38, 0x15, 0x39, 0xe1, 0xa0, 0x87, 0x30, 0xdb, 0xe9, 0x9f, - 0x50, 0x66, 0xcf, 0x2a, 0xf2, 0xd6, 0xc5, 0xe4, 0x66, 0xff, 0xe4, 0xd5, 0x6b, 0xc5, 0xd5, 0x0c, - 0xb9, 0x5d, 0xde, 0x1d, 0x60, 0x7b, 0xee, 0xb2, 0xed, 0x3a, 0xdd, 0x01, 0xd6, 0xdb, 0x95, 0x78, - 0x59, 0xe7, 0x80, 0x88, 0x53, 0xc6, 0x4f, 0xec, 0xf2, 0x65, 0x75, 0xfe, 0xaf, 0x06, 0xea, 0x3a, - 0x27, 0xac, 0xfa, 0x09, 0x54, 0x8d, 0xfa, 0xa3, 0x6b, 0x30, 0x1b, 0x47, 0xd8, 0x27, 0x76, 0x61, - 0xb3, 0xb0, 0x5b, 0x72, 0xf4, 0x00, 0x2d, 0x41, 0x71, 0x80, 0x47, 0xea, 0x5d, 0x94, 0x1c, 0xf9, - 0x88, 0x6c, 0x28, 0xbf, 0xc3, 0xb4, 0xef, 0x05, 0x42, 0x95, 0xba, 0xe4, 0xa4, 0x43, 0xb4, 0x0e, - 0x95, 0x10, 0xfb, 0x24, 0xa2, 0xdf, 0x10, 0x55, 0x44, 0xcb, 0xc9, 0xc6, 0xf5, 0x47, 0x50, 0x49, - 0x5f, 0x97, 0x54, 0xf0, 0x62, 0xce, 0x49, 0x20, 0x92, 0x58, 0xe9, 0x50, 0xe6, 0xd0, 0xa7, 0x03, - 0x2a, 0x92, 0x78, 0x7a, 0x50, 0xff, 0xbe, 0x00, 0xe5, 0xe4, 0xa5, 0xa1, 0x7f, 0x98, 0x59, 0x7e, - 0xb5, 0x5c, 0xfb, 0xad, 0xa3, 0x23, 0x89, 0x4c, 0x77, 0xd2, 0x04, 0x10, 0x3d, 0xce, 0x84, 0xe8, - 0xd3, 0xc0, 0xbf, 0xdc, 0x5c, 0x87, 0x1a, 0x4b, 0x1c, 0x83, 0x55, 0x7f, 0x0f, 0x95, 0x54, 0x56, - 0xe6, 0x2a, 0x98, 0xc0, 0xfd, 0xb4, 0x5e, 0x6a, 0x80, 0x56, 0x61, 0xee, 0x84, 0xf0, 0x80, 0xf4, - 0x93, 0x2d, 0x24, 0x23, 0x84, 0xa0, 0x14, 0x47, 0x84, 0x27, 0x25, 0x53, 0xcf, 0x68, 0x0b, 0xca, - 0x21, 0xe1, 0xae, 0x34, 0x6d, 0x69, 0xb3, 0xb8, 0x5b, 0x6a, 0xc2, 0xf8, 0xbc, 0x36, 0xd7, 0x22, - 0x5c, 0x9a, 0x72, 0x2e, 0x24, 0x7c, 0x3f, 0x8c, 0xeb, 0x23, 0xa8, 0xa4, 0xa9, 0xc8, 0xc2, 0x85, - 0x84, 0x53, 0xd6, 0x8d, 0xd2, 0xc2, 0x25, 0x43, 0x74, 0x17, 0x96, 0x93, 0x34, 0x49, 0xd7, 0x4d, - 0x31, 0x3a, 0x83, 0xa5, 0x6c, 0xa1, 0x95, 0x80, 0x77, 0x60, 0x31, 0x07, 0x0b, 0x3a, 0x20, 0x49, - 0x56, 0x0b, 0xd9, 0xec, 0x21, 0x1d, 0x90, 0xfa, 0xaf, 0x55, 0x80, 0xdc, 0xea, 0x72, 0xbf, 0x1e, - 0xf6, 0x7a, 0x99, 0x3f, 0xd4, 0x00, 0xad, 0x41, 0x91, 0x47, 0x49, 0x28, 0x7d, 0xa2, 0x9c, 0x76, - 0xdb, 0x91, 0x73, 0xe8, 0x4f, 0x50, 0xe1, 0x51, 0xe4, 0xca, 0x63, 0xad, 0x03, 0x34, 0xab, 0xe3, - 0xf3, 0x5a, 0xd9, 0x69, 0xb7, 0xa5, 0xed, 0x9c, 0x32, 0x8f, 0x22, 0xf9, 0x80, 0x6a, 0x50, 0x1d, - 0xe0, 0x30, 0x24, 0x5d, 0xf7, 0x1d, 0xed, 0x6b, 0xe7, 0x94, 0x1c, 0xd0, 0x53, 0x2f, 0x68, 0x5f, - 0x55, 0xba, 0x4b, 0xb9, 0x38, 0x53, 0x87, 0xab, 0xe4, 0xe8, 0x01, 0xba, 0x05, 0xd6, 0x29, 0xa7, - 0x82, 0x74, 0xb0, 0x77, 0xa2, 0x0e, 0x4f, 0xc9, 0xc9, 0x27, 0x90, 0x0d, 0x95, 0xd0, 0x77, 0x43, - 0xdf, 0xa5, 0x81, 0x5d, 0xd6, 0x6f, 0x22, 0xf4, 0x5b, 0xfe, 0xab, 0x00, 0xad, 0x83, 0xa5, 0x57, - 0x58, 0x2c, 0xec, 0x4a, 0x52, 0x46, 0xbf, 0xe5, 0xbf, 0x8e, 0x05, 0x5a, 0x53, 0xac, 0x77, 0x38, - 0xee, 0x0b, 0xdb, 0x4a, 0x97, 0x5e, 0xc8, 0x21, 0xda, 0x84, 0xf9, 0xd0, 0x77, 0x07, 0xf8, 0x38, - 0x59, 0x06, 0x9d, 0x66, 0xe8, 0x1f, 0xe0, 0x63, 0x8d, 0xd8, 0x82, 0x05, 0x1a, 0x60, 0x4f, 0xd0, - 0x21, 0x71, 0x71, 0xc0, 0x02, 0xbb, 0xaa, 0x20, 0xf3, 0xe9, 0xe4, 0xd3, 0x80, 0x05, 0x72, 0xb3, - 0x26, 0x64, 0x5e, 0xab, 0x18, 0x00, 0x53, 0x45, 0xd5, 0x63, 0x61, 0x52, 0x45, 0x55, 0x24, 0x57, - 0x51, 0x90, 0x45, 0x53, 0x45, 0x01, 0x36, 0xa1, 0x1a, 0x07, 0x64, 0x48, 0x3d, 0x81, 0x3b, 0x7d, - 0x62, 0x5f, 0x55, 0x00, 0x73, 0x0a, 0x3d, 0x82, 0xb5, 0x1e, 0x25, 0x1c, 0x73, 0xaf, 0x47, 0x3d, - 0xdc, 0x77, 0xf5, 0x87, 0xcc, 0xd5, 0xc7, 0x6f, 0x49, 0xe1, 0x6f, 0x98, 0x00, 0xed, 0x84, 0xff, - 0xc8, 0x65, 0xf4, 0x00, 0x26, 0x96, 0xdc, 0xe8, 0x14, 0x87, 0x09, 0x73, 0x59, 0x31, 0xaf, 0x9b, - 0xcb, 0xed, 0x53, 0x1c, 0x6a, 0x5e, 0x0d, 0xaa, 0xea, 0x94, 0xb8, 0xda, 0x48, 0x48, 0xa7, 0xad, - 0xa6, 0xf6, 0x95, 0x9b, 0xfe, 0x02, 0x96, 0x06, 0x48, 0x4f, 0xad, 0x28, 0xcf, 0xcc, 0x8f, 0xcf, - 0x6b, 0x95, 0x43, 0x39, 0x29, 0x8d, 0x55, 0x51, 0xcb, 0x4e, 0x14, 0xa1, 0x07, 0xb0, 0x98, 0x41, - 0xb5, 0xc7, 0xae, 0x29, 0xfc, 0xd2, 0xf8, 0xbc, 0x36, 0x9f, 0xe2, 0x95, 0xd1, 0xe6, 0x53, 0x8e, - 0x72, 0xdb, 0x5f, 0x61, 0x59, 0xf3, 0x4c, 0xcf, 0x5d, 0x57, 0x99, 0x5c, 0x55, 0x0b, 0x07, 0xb9, - 0xf1, 0xb2, 0x7c, 0xb5, 0xfd, 0x56, 0x8d, 0x7c, 0x9f, 0x29, 0x0f, 0xfe, 0x19, 0x34, 0xc7, 0xcd, - 0x9d, 0x78, 0x43, 0x81, 0x74, 0x6e, 0x6f, 0x33, 0x3b, 0x6e, 0xa5, 0xd9, 0x66, 0xa6, 0xb4, 0xf5, - 0x2b, 0x51, 0xb3, 0x2d, 0xed, 0xcc, 0x9d, 0x54, 0x2d, 0xf7, 0xe7, 0x9a, 0x7e, 0xf9, 0x19, 0x4a, - 0x9a, 0x74, 0xdb, 0xd0, 0xd2, 0x5e, 0x5c, 0x9f, 0x40, 0x69, 0x37, 0xde, 0x05, 0x94, 0xa1, 0x72, - 0xd7, 0xde, 0x34, 0x36, 0xda, 0xca, 0xad, 0xdb, 0x80, 0x15, 0x0d, 0x9e, 0x34, 0xf0, 0x2d, 0x85, - 0xd6, 0xf5, 0x7a, 0x65, 0xba, 0x38, 0x2b, 0xa2, 0x89, 0xbe, 0x6d, 0x68, 0x3f, 0xcd, 0xb1, 0x9f, - 0x6b, 0xab, 0x92, 0x6f, 0x7c, 0x41, 0x5b, 0x15, 0xfd, 0x53, 0x6d, 0x85, 0xae, 0x7d, 0xa6, 0xad, - 0xb0, 0x77, 0x53, 0xac, 0x69, 0xf6, 0xcd, 0xe4, 0xb3, 0x27, 0x17, 0x8e, 0x0c, 0xc7, 0xff, 0x33, - 0xbd, 0x3a, 0xee, 0xa8, 0x6f, 0xff, 0xce, 0x65, 0x17, 0xfc, 0xf3, 0x40, 0xf0, 0xb3, 0xf4, 0xf6, - 0x78, 0x08, 0x25, 0xe9, 0x72, 0xbb, 0x3e, 0x0d, 0x57, 0x51, 0xd0, 0x93, 0xec, 0x4a, 0xd8, 0x9a, - 0x86, 0x9c, 0xde, 0x1c, 0x6d, 0x00, 0xfd, 0xe4, 0x0a, 0x2f, 0xb4, 0xb7, 0xa7, 0x90, 0x68, 0x2e, - 0x8c, 0xcf, 0x6b, 0xd6, 0xbf, 0x15, 0xf9, 0x70, 0xbf, 0xe5, 0x58, 0x5a, 0xe7, 0xd0, 0x0b, 0xeb, - 0x04, 0xaa, 0x06, 0x30, 0xbf, 0x77, 0x0b, 0xc6, 0xbd, 0x9b, 0x77, 0x04, 0x33, 0x5f, 0xe8, 0x08, - 0x8a, 0x5f, 0xec, 0x08, 0x4a, 0x13, 0x1d, 0x41, 0xfd, 0xe7, 0x59, 0xb0, 0xb2, 0x86, 0x07, 0x61, - 0x58, 0xa7, 0xcc, 0x8d, 0x08, 0x1f, 0x52, 0x8f, 0xb8, 0x9d, 0x33, 0x41, 0x22, 0x97, 0x13, 0x2f, - 0xe6, 0x11, 0x1d, 0x92, 0xa4, 0x59, 0xdc, 0xbe, 0xa4, 0x73, 0xd2, 0xb5, 0xb9, 0x41, 0x59, 0x5b, - 0xcb, 0x34, 0xa5, 0x8a, 0x93, 0x8a, 0xa0, 0xff, 0xc1, 0xf5, 0x3c, 0x44, 0xd7, 0x50, 0x9f, 0x99, - 0x42, 0x7d, 0x25, 0x53, 0xef, 0xe6, 0xca, 0x87, 0xb0, 0x42, 0x99, 0xfb, 0x3e, 0x26, 0xf1, 0x84, - 0x6e, 0x71, 0x0a, 0xdd, 0x65, 0xca, 0xde, 0x28, 0x7e, 0xae, 0xea, 0xc2, 0x9a, 0x51, 0x12, 0x79, - 0x17, 0x1b, 0xda, 0xa5, 0x29, 0xb4, 0x57, 0xb3, 0x9c, 0xe5, 0xdd, 0x9d, 0x07, 0xf8, 0x3f, 0xac, - 0x52, 0xe6, 0x9e, 0x62, 0x2a, 0x3e, 0x55, 0x9f, 0x9d, 0xae, 0x22, 0x6f, 0x31, 0x15, 0x93, 0xd2, - 0xba, 0x22, 0x03, 0xc2, 0xfd, 0x89, 0x8a, 0xcc, 0x4d, 0x57, 0x91, 0x03, 0xc5, 0xcf, 0x55, 0x5b, - 0xb0, 0x4c, 0xd9, 0xa7, 0xb9, 0x96, 0xa7, 0xd0, 0xbc, 0x4a, 0xd9, 0x64, 0x9e, 0x6f, 0x60, 0x39, - 0x22, 0x9e, 0x60, 0xdc, 0x74, 0x5b, 0x65, 0x0a, 0xc5, 0xa5, 0x84, 0x9e, 0x49, 0xd6, 0x87, 0x00, - 0xf9, 0x3a, 0x5a, 0x84, 0x19, 0x16, 0xaa, 0xa3, 0x63, 0x39, 0x33, 0x2c, 0x94, 0x3d, 0x60, 0x57, - 0x7e, 0x76, 0xf4, 0xc1, 0xb1, 0x9c, 0x64, 0x24, 0xcf, 0xd3, 0x00, 0x1f, 0xb3, 0xb4, 0x09, 0xd4, - 0x03, 0x35, 0x4b, 0x03, 0xc6, 0x93, 0xb3, 0xa3, 0x07, 0x72, 0x76, 0x88, 0xfb, 0x31, 0x49, 0x7b, - 0x1e, 0x35, 0xa8, 0x7f, 0x57, 0x80, 0x4a, 0xfa, 0x1b, 0x80, 0x9e, 0x98, 0x6d, 0x74, 0xf1, 0xeb, - 0x7f, 0x1d, 0x92, 0xa4, 0x37, 0x93, 0xf5, 0xda, 0x0f, 0xf3, 0x5e, 0xfb, 0x0f, 0x93, 0x93, 0x86, - 0x9c, 0x80, 0x95, 0xcd, 0x19, 0xbb, 0x2d, 0x4c, 0xec, 0xb6, 0x06, 0xd5, 0x9e, 0x87, 0xdd, 0x1e, - 0x0e, 0xba, 0x7d, 0xa2, 0x3b, 0xc4, 0x05, 0x07, 0x7a, 0x1e, 0x7e, 0xa9, 0x67, 0x52, 0x00, 0xeb, - 0x1c, 0x13, 0x4f, 0x44, 0xaa, 0x28, 0x1a, 0xf0, 0x5a, 0xcf, 0xd4, 0x7f, 0x98, 0x81, 0xaa, 0xf1, - 0xe7, 0x22, 0x7b, 0xe8, 0x00, 0x0f, 0xd2, 0x38, 0xea, 0x59, 0x76, 0x6c, 0x7c, 0xa4, 0xbf, 0x25, - 0xc9, 0x67, 0xaa, 0xcc, 0x47, 0xea, 0xa3, 0x80, 0x6e, 0x03, 0xf0, 0x91, 0x1b, 0x62, 0xef, 0x84, - 0x24, 0xf2, 0x25, 0xc7, 0xe2, 0xa3, 0x96, 0x9e, 0x40, 0x37, 0xc1, 0xe2, 0x23, 0x97, 0x70, 0xce, - 0x78, 0x94, 0xd4, 0xbe, 0xc2, 0x47, 0xcf, 0xd5, 0x38, 0xe1, 0x76, 0x39, 0x93, 0xbd, 0x40, 0xf2, - 0x0e, 0x2c, 0x3e, 0x7a, 0xa6, 0x27, 0x64, 0x54, 0x91, 0x46, 0xd5, 0xad, 0x67, 0x59, 0xe4, 0x51, - 0x45, 0x1e, 0x55, 0xb7, 0x9e, 0x96, 0x30, 0xa3, 0x8a, 0x2c, 0xaa, 0xee, 0x3e, 0x2b, 0xc2, 0x88, - 0x2a, 0xf2, 0xa8, 0x56, 0xca, 0x4d, 0xa2, 0x36, 0xed, 0x0f, 0x1f, 0x37, 0xae, 0xfc, 0xf2, 0x71, - 0xe3, 0xca, 0xb7, 0xe3, 0x8d, 0xc2, 0x87, 0xf1, 0x46, 0xe1, 0xa7, 0xf1, 0x46, 0xe1, 0xb7, 0xf1, - 0x46, 0xa1, 0x33, 0xa7, 0x7e, 0xc3, 0xff, 0xf6, 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2f, 0xc0, - 0x49, 0x92, 0xee, 0x0f, 0x00, 0x00, + // 1669 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0x4f, 0x73, 0x1b, 0xb7, + 0x15, 0x0f, 0xc5, 0x95, 0xc8, 0x7d, 0x94, 0x6c, 0x09, 0xfe, 0xb7, 0x52, 0x1c, 0x51, 0xa1, 0xec, + 0xd6, 0xad, 0xa7, 0xd2, 0x24, 0xed, 0x78, 0xea, 0x34, 0x99, 0x4e, 0xa4, 0x24, 0x63, 0x4f, 0xab, + 0x9a, 0x59, 0x4a, 0x93, 0xf6, 0xb4, 0x03, 0x2e, 0xe1, 0x25, 0xac, 0xe5, 0x62, 0x83, 0xc5, 0x52, + 0x74, 0x4f, 0x3d, 0x74, 0xa6, 0xa7, 0x7e, 0xa0, 0x7e, 0x83, 0x1c, 0x7b, 0xe9, 0x4c, 0x7b, 0xd1, + 0x34, 0xfc, 0x1c, 0x3d, 0x74, 0x80, 0x87, 0xfd, 0x43, 0xc7, 0xb2, 0xc2, 0xdb, 0xe2, 0xe1, 0xf7, + 0x7e, 0xef, 0xe1, 0xe1, 0x07, 0xe0, 0x91, 0xf0, 0xab, 0x88, 0xab, 0x71, 0x3e, 0x3c, 0x08, 0xc5, + 0xe4, 0x30, 0x14, 0x89, 0xa2, 0x3c, 0x61, 0x72, 0x74, 0x18, 0x46, 0x52, 0xe4, 0x69, 0x76, 0x98, + 0x29, 0xaa, 0xb2, 0xc3, 0xe9, 0x47, 0x87, 0x13, 0xa6, 0x24, 0x0f, 0xb3, 0x83, 0x54, 0x0a, 0x25, + 0x88, 0xc7, 0xc5, 0x41, 0x85, 0x3e, 0xb0, 0xe8, 0x83, 0xe9, 0x47, 0x3b, 0xb7, 0x23, 0x11, 0x09, + 0x03, 0x3a, 0xd4, 0x5f, 0x88, 0xef, 0xfd, 0xaf, 0x09, 0xad, 0x13, 0x64, 0x20, 0xbf, 0x85, 0xd6, + 0x38, 0x8f, 0x98, 0x8a, 0x87, 0x5e, 0x63, 0xaf, 0xf9, 0xa8, 0xf3, 0xf1, 0xc3, 0x83, 0xab, 0xd8, + 0x0e, 0x9e, 0x21, 0x70, 0xa0, 0xa8, 0xf2, 0x0b, 0x2f, 0xf2, 0x04, 0x9c, 0x94, 0x8f, 0x32, 0x6f, + 0x65, 0xaf, 0xf1, 0xa8, 0xf3, 0x71, 0xef, 0x6a, 0xef, 0x3e, 0x1f, 0x65, 0xc6, 0xd5, 0xe0, 0xc9, + 0xa7, 0xd0, 0x0c, 0xd3, 0xdc, 0x6b, 0x1a, 0xb7, 0x0f, 0xaf, 0x76, 0x3b, 0xee, 0x9f, 0x69, 0xaf, + 0xa3, 0xd6, 0xfc, 0xb2, 0xdb, 0x3c, 0xee, 0x9f, 0xf9, 0xda, 0x8d, 0x7c, 0x0a, 0x6b, 0x13, 0x36, + 0x11, 0xf2, 0xb5, 0xe7, 0x18, 0x82, 0x07, 0x57, 0x13, 0x9c, 0x18, 0x9c, 0x89, 0x6c, 0x7d, 0xc8, + 0x53, 0x58, 0x1d, 0xc6, 0xe7, 0x5c, 0x78, 0xab, 0xc6, 0x79, 0xff, 0x6a, 0xe7, 0xa3, 0xf8, 0xfc, + 0xf9, 0x0b, 0xe3, 0x8b, 0x1e, 0x7a, 0xb9, 0x72, 0x34, 0xa1, 0xde, 0xda, 0x75, 0xcb, 0xf5, 0x47, + 0x13, 0x8a, 0xcb, 0xd5, 0x78, 0x5d, 0xe7, 0x84, 0xa9, 0x0b, 0x21, 0xcf, 0xbd, 0xd6, 0x75, 0x75, + 0xfe, 0x03, 0x02, 0xb1, 0xce, 0xd6, 0x8b, 0x3c, 0x83, 0x75, 0x84, 0x04, 0x46, 0x05, 0x5e, 0xdb, + 0x24, 0xf0, 0x0e, 0x96, 0x63, 0xf3, 0xa9, 0x49, 0x32, 0xbf, 0x13, 0x56, 0x83, 0xde, 0x39, 0x74, + 0x6a, 0x3b, 0x49, 0x6e, 0xc3, 0x6a, 0x9e, 0xd1, 0x88, 0x79, 0x8d, 0xbd, 0xc6, 0x23, 0xc7, 0xc7, + 0x01, 0xd9, 0x84, 0xe6, 0x84, 0xce, 0xcc, 0xae, 0x3a, 0xbe, 0xfe, 0x24, 0x1e, 0xb4, 0x5e, 0x52, + 0x1e, 0x87, 0x89, 0x32, 0x9b, 0xe6, 0xf8, 0xc5, 0x90, 0xec, 0x40, 0x3b, 0xa5, 0x11, 0xcb, 0xf8, + 0x9f, 0x99, 0xd9, 0x0e, 0xd7, 0x2f, 0xc7, 0xbd, 0x4f, 0xa0, 0x5d, 0x6c, 0xbc, 0x66, 0x08, 0x73, + 0x29, 0x59, 0xa2, 0x6c, 0xac, 0x62, 0xa8, 0x73, 0x88, 0xf9, 0x84, 0x2b, 0x1b, 0x0f, 0x07, 0xbd, + 0xbf, 0x35, 0xa0, 0x65, 0xb7, 0x9f, 0xfc, 0xba, 0x9e, 0xe5, 0x3b, 0x0b, 0x7f, 0xdc, 0x3f, 0x3b, + 0xd3, 0xc8, 0x62, 0x25, 0x47, 0x00, 0x6a, 0x2c, 0x85, 0x52, 0x31, 0x4f, 0xa2, 0xeb, 0x65, 0x7a, + 0x8a, 0x58, 0xe6, 0xd7, 0xbc, 0x7a, 0xdf, 0x42, 0xbb, 0xa0, 0xd5, 0xb9, 0x2a, 0xa1, 0x68, 0x5c, + 0xd4, 0xcb, 0x0c, 0xc8, 0x5d, 0x58, 0x3b, 0x67, 0x32, 0x61, 0xb1, 0x5d, 0x82, 0x1d, 0x11, 0x02, + 0x4e, 0x9e, 0x31, 0x69, 0x4b, 0x66, 0xbe, 0xc9, 0x3e, 0xb4, 0x52, 0x26, 0x03, 0x2d, 0x7f, 0x67, + 0xaf, 0xf9, 0xc8, 0x39, 0x82, 0xf9, 0x65, 0x77, 0xad, 0xcf, 0xa4, 0x96, 0xf7, 0x5a, 0xca, 0xe4, + 0x71, 0x9a, 0xf7, 0x66, 0xd0, 0x2e, 0x52, 0xd1, 0x85, 0x4b, 0x99, 0xe4, 0x62, 0x94, 0x15, 0x85, + 0xb3, 0x43, 0xf2, 0x18, 0xb6, 0x6c, 0x9a, 0x6c, 0x14, 0x14, 0x18, 0xcc, 0x60, 0xb3, 0x9c, 0xe8, + 0x5b, 0xf0, 0x43, 0xb8, 0x51, 0x81, 0x15, 0x9f, 0x30, 0x9b, 0xd5, 0x46, 0x69, 0x3d, 0xe5, 0x13, + 0xd6, 0xfb, 0x4f, 0x07, 0xa0, 0x3a, 0x34, 0x7a, 0xbd, 0x21, 0x0d, 0xc7, 0xa5, 0x3e, 0xcc, 0x80, + 0x6c, 0x43, 0x53, 0x66, 0x36, 0x14, 0x9e, 0x4d, 0x7f, 0x30, 0xf0, 0xb5, 0x8d, 0xfc, 0x04, 0xda, + 0x32, 0xcb, 0x02, 0x7d, 0x41, 0x60, 0x80, 0xa3, 0xce, 0xfc, 0xb2, 0xdb, 0xf2, 0x07, 0x03, 0x2d, + 0x3b, 0xbf, 0x25, 0xb3, 0x4c, 0x7f, 0x90, 0x2e, 0x74, 0x26, 0x34, 0x4d, 0xd9, 0x28, 0x78, 0xc9, + 0x63, 0x54, 0x8e, 0xe3, 0x03, 0x9a, 0xbe, 0xe2, 0xb1, 0xa9, 0xf4, 0x88, 0x4b, 0xf5, 0xda, 0x1c, + 0x53, 0xc7, 0xc7, 0x01, 0xb9, 0x0f, 0xee, 0x85, 0xe4, 0x8a, 0x0d, 0x69, 0x78, 0x6e, 0x8e, 0xa1, + 0xe3, 0x57, 0x06, 0xe2, 0x41, 0x3b, 0x8d, 0x82, 0x34, 0x0a, 0x78, 0xe2, 0xb5, 0x70, 0x27, 0xd2, + 0xa8, 0x1f, 0x3d, 0x4f, 0xc8, 0x0e, 0xb8, 0x38, 0x23, 0x72, 0x65, 0x4e, 0x8f, 0x2e, 0x63, 0xd4, + 0x8f, 0x5e, 0xe4, 0x8a, 0x6c, 0x1b, 0xaf, 0x97, 0x34, 0x8f, 0x95, 0xe7, 0x16, 0x53, 0x5f, 0xe9, + 0x21, 0xd9, 0x83, 0xf5, 0x34, 0x0a, 0x26, 0xf4, 0x95, 0x9d, 0x06, 0x4c, 0x33, 0x8d, 0x4e, 0xe8, + 0x2b, 0x44, 0xec, 0xc3, 0x06, 0x4f, 0x68, 0xa8, 0xf8, 0x94, 0x05, 0x34, 0x11, 0x89, 0xd7, 0x31, + 0x90, 0xf5, 0xc2, 0xf8, 0x79, 0x22, 0x12, 0xbd, 0xd8, 0x3a, 0x64, 0x1d, 0x59, 0x6a, 0x80, 0x3a, + 0x8b, 0xa9, 0xc7, 0xc6, 0x22, 0x8b, 0xa9, 0x48, 0xc5, 0x62, 0x20, 0x37, 0xea, 0x2c, 0x06, 0xb0, + 0x07, 0x9d, 0x3c, 0x61, 0x53, 0x1e, 0x2a, 0x3a, 0x8c, 0x99, 0x77, 0xd3, 0x00, 0xea, 0x26, 0xf2, + 0x09, 0x6c, 0x8f, 0x39, 0x93, 0x54, 0x86, 0x63, 0x1e, 0xd2, 0x38, 0xc0, 0x2b, 0x31, 0xc0, 0xe3, + 0xb7, 0x69, 0xf0, 0xf7, 0xea, 0x00, 0x54, 0xc2, 0xef, 0xf5, 0x34, 0x79, 0x02, 0x0b, 0x53, 0x41, + 0x76, 0x41, 0x53, 0xeb, 0xb9, 0x65, 0x3c, 0xef, 0xd4, 0xa7, 0x07, 0x17, 0x34, 0x45, 0xbf, 0x2e, + 0x74, 0xcc, 0x29, 0x09, 0x50, 0x48, 0x04, 0xd3, 0x36, 0xa6, 0x63, 0xa3, 0xa6, 0x9f, 0x81, 0x8b, + 0x00, 0xad, 0xa9, 0x5b, 0x46, 0x33, 0xeb, 0xf3, 0xcb, 0x6e, 0xfb, 0x54, 0x1b, 0xb5, 0xb0, 0xda, + 0x66, 0xda, 0xcf, 0x32, 0xf2, 0x04, 0x6e, 0x94, 0x50, 0xd4, 0xd8, 0x6d, 0x83, 0xdf, 0x9c, 0x5f, + 0x76, 0xd7, 0x0b, 0xbc, 0x11, 0xda, 0x7a, 0xe1, 0x63, 0xd4, 0xf6, 0x73, 0xd8, 0x42, 0xbf, 0xba, + 0xe6, 0xee, 0x98, 0x4c, 0x6e, 0x9a, 0x89, 0x93, 0x4a, 0x78, 0x65, 0xbe, 0x28, 0xbf, 0xbb, 0xb5, + 0x7c, 0xbf, 0x30, 0x1a, 0xfc, 0x29, 0xa0, 0x4f, 0x50, 0x29, 0xf1, 0x9e, 0x01, 0x61, 0x6e, 0xdf, + 0x94, 0x72, 0xdc, 0x2f, 0xb2, 0x2d, 0x45, 0xe9, 0xe1, 0x96, 0x18, 0x6b, 0x1f, 0x95, 0xf9, 0xb0, + 0x60, 0xab, 0xf4, 0xb9, 0x8d, 0x9b, 0x5f, 0xa2, 0xb4, 0x48, 0x1f, 0xd4, 0xb8, 0x50, 0x8b, 0x3b, + 0x0b, 0x28, 0x54, 0xe3, 0x63, 0x20, 0x25, 0xaa, 0x52, 0xed, 0xfb, 0xb5, 0x85, 0xf6, 0x2b, 0xe9, + 0x1e, 0xc0, 0x2d, 0x04, 0x2f, 0x0a, 0xf8, 0xbe, 0x41, 0x63, 0xbd, 0x9e, 0xd7, 0x55, 0x5c, 0x16, + 0xb1, 0x8e, 0xfe, 0xa0, 0xc6, 0xfd, 0x79, 0x85, 0xfd, 0x21, 0xb7, 0x29, 0xf9, 0xee, 0x5b, 0xb8, + 0x4d, 0xd1, 0xdf, 0xe4, 0x36, 0xe8, 0xee, 0x0f, 0xb8, 0x0d, 0xf6, 0x71, 0x81, 0xad, 0x8b, 0x7d, + 0xcf, 0x5e, 0x7b, 0x7a, 0xe2, 0xac, 0xa6, 0xf8, 0xdf, 0x14, 0x4f, 0xc7, 0x87, 0xd7, 0x3d, 0x99, + 0xa8, 0xf5, 0x2f, 0x13, 0x25, 0x5f, 0x17, 0xaf, 0xc7, 0x53, 0x70, 0xb4, 0xca, 0xbd, 0xde, 0x32, + 0xbe, 0xc6, 0x85, 0x7c, 0x56, 0x3e, 0x09, 0xfb, 0xcb, 0x38, 0x17, 0x2f, 0xc7, 0x00, 0x00, 0xbf, + 0x02, 0x15, 0xa6, 0xde, 0x83, 0x25, 0x28, 0x8e, 0x36, 0xe6, 0x97, 0x5d, 0xf7, 0x77, 0xc6, 0xf9, + 0xf4, 0xb8, 0xef, 0xbb, 0xc8, 0x73, 0x1a, 0xa6, 0x3d, 0x06, 0x9d, 0x1a, 0xb0, 0x7a, 0x77, 0x1b, + 0xb5, 0x77, 0xb7, 0xea, 0x08, 0x56, 0xde, 0xd2, 0x11, 0x34, 0xdf, 0xda, 0x11, 0x38, 0x0b, 0x1d, + 0x41, 0xef, 0x5f, 0xab, 0xe0, 0x96, 0xad, 0x13, 0xa1, 0xb0, 0xc3, 0x45, 0x90, 0x31, 0x39, 0xe5, + 0x21, 0x0b, 0x86, 0xaf, 0x15, 0xcb, 0x02, 0xc9, 0xc2, 0x5c, 0x66, 0x7c, 0xca, 0x6c, 0xdb, 0xf9, + 0xe0, 0x9a, 0x1e, 0x0c, 0x6b, 0x73, 0x8f, 0x8b, 0x01, 0xd2, 0x1c, 0x69, 0x16, 0xbf, 0x20, 0x21, + 0x7f, 0x84, 0x3b, 0x55, 0x88, 0x51, 0x8d, 0x7d, 0x65, 0x09, 0xf6, 0x5b, 0x25, 0xfb, 0xa8, 0x62, + 0x3e, 0x85, 0x5b, 0x5c, 0x04, 0xdf, 0xe6, 0x2c, 0x5f, 0xe0, 0x6d, 0x2e, 0xc1, 0xbb, 0xc5, 0xc5, + 0xd7, 0xc6, 0xbf, 0x62, 0x0d, 0x60, 0xbb, 0x56, 0x12, 0xfd, 0x16, 0xd7, 0xb8, 0x9d, 0x25, 0xb8, + 0xef, 0x96, 0x39, 0xeb, 0xb7, 0xbb, 0x0a, 0xf0, 0x27, 0xb8, 0xcb, 0x45, 0x70, 0x41, 0xb9, 0x7a, + 0x93, 0x7d, 0x75, 0xb9, 0x8a, 0x7c, 0x43, 0xb9, 0x5a, 0xa4, 0xc6, 0x8a, 0x4c, 0x98, 0x8c, 0x16, + 0x2a, 0xb2, 0xb6, 0x5c, 0x45, 0x4e, 0x8c, 0x7f, 0xc5, 0xda, 0x87, 0x2d, 0x2e, 0xde, 0xcc, 0xb5, + 0xb5, 0x04, 0xe7, 0x4d, 0x2e, 0x16, 0xf3, 0xfc, 0x1a, 0xb6, 0x32, 0x16, 0x2a, 0x21, 0xeb, 0x6a, + 0x6b, 0x2f, 0xc1, 0xb8, 0x69, 0xdd, 0x4b, 0xca, 0xde, 0x14, 0xa0, 0x9a, 0x27, 0x37, 0x60, 0x45, + 0xa4, 0xe6, 0xe8, 0xb8, 0xfe, 0x8a, 0x48, 0x75, 0x0f, 0x38, 0xd2, 0xd7, 0x0e, 0x1e, 0x1c, 0xd7, + 0xb7, 0x23, 0x7d, 0x9e, 0x26, 0xf4, 0x95, 0x28, 0x9a, 0x40, 0x1c, 0x18, 0x2b, 0x4f, 0x84, 0xb4, + 0x67, 0x07, 0x07, 0xda, 0x3a, 0xa5, 0x71, 0xce, 0x8a, 0x9e, 0xc7, 0x0c, 0x7a, 0x7f, 0x6d, 0x40, + 0xbb, 0xf8, 0x41, 0x41, 0x3e, 0xab, 0xb7, 0xd1, 0xcd, 0x77, 0xff, 0x7e, 0xd1, 0x4e, 0xb8, 0x98, + 0xb2, 0xd7, 0x7e, 0x5a, 0xf5, 0xda, 0x3f, 0xda, 0xd9, 0x36, 0xe4, 0x0c, 0xdc, 0xd2, 0x56, 0x5b, + 0x6d, 0x63, 0x61, 0xb5, 0x5d, 0xe8, 0x8c, 0x43, 0x1a, 0x8c, 0x69, 0x32, 0x8a, 0x19, 0x76, 0x88, + 0x1b, 0x3e, 0x8c, 0x43, 0xfa, 0x0c, 0x2d, 0x05, 0x40, 0x0c, 0x5f, 0xb1, 0x50, 0x65, 0xa6, 0x28, + 0x08, 0x78, 0x81, 0x96, 0xde, 0xdf, 0x57, 0xa0, 0x53, 0xfb, 0x0d, 0xa4, 0x7b, 0xe8, 0x84, 0x4e, + 0x8a, 0x38, 0xe6, 0x5b, 0x77, 0x6c, 0x72, 0x86, 0x77, 0x89, 0xbd, 0xa6, 0x5a, 0x72, 0x66, 0x2e, + 0x05, 0xf2, 0x01, 0x80, 0x9c, 0x05, 0x29, 0x0d, 0xcf, 0x99, 0xa5, 0x77, 0x7c, 0x57, 0xce, 0xfa, + 0x68, 0x20, 0xef, 0x83, 0x2b, 0x67, 0x01, 0x93, 0x52, 0xc8, 0xcc, 0xd6, 0xbe, 0x2d, 0x67, 0x5f, + 0x9a, 0xb1, 0xf5, 0x1d, 0x49, 0xa1, 0x7b, 0x01, 0xbb, 0x07, 0xae, 0x9c, 0x7d, 0x81, 0x06, 0x1d, + 0x55, 0x15, 0x51, 0xb1, 0xf5, 0x6c, 0xa9, 0x2a, 0xaa, 0xaa, 0xa2, 0x62, 0xeb, 0xe9, 0xaa, 0x7a, + 0x54, 0x55, 0x46, 0xc5, 0xee, 0xb3, 0xad, 0x6a, 0x51, 0x55, 0x15, 0xd5, 0x2d, 0x7c, 0x6d, 0xd4, + 0xde, 0x3f, 0x1a, 0xd0, 0xa9, 0xfd, 0x9a, 0xd3, 0x05, 0x4c, 0x64, 0x90, 0xc5, 0x8c, 0xa5, 0xfa, + 0x27, 0x0d, 0xde, 0xdd, 0x90, 0xc8, 0x81, 0xb5, 0x68, 0xbe, 0x44, 0x06, 0x32, 0x4f, 0x92, 0xe2, + 0x27, 0x8f, 0xe3, 0xbb, 0x89, 0xf4, 0xd1, 0x60, 0xa7, 0x33, 0x85, 0xe1, 0x9a, 0xc5, 0xf4, 0x00, + 0x0d, 0xe4, 0x17, 0x40, 0x12, 0x19, 0xe4, 0x09, 0x4f, 0x14, 0x93, 0x32, 0x4f, 0x15, 0x1f, 0x96, + 0xed, 0xf9, 0x56, 0x22, 0xcf, 0x16, 0x27, 0xc8, 0x7d, 0xc3, 0x66, 0x2f, 0x1b, 0x5b, 0xb2, 0x76, + 0x22, 0x9f, 0x9b, 0x9b, 0xe3, 0xc8, 0xfb, 0xee, 0xfb, 0xdd, 0xf7, 0xfe, 0xfd, 0xfd, 0xee, 0x7b, + 0x7f, 0x99, 0xef, 0x36, 0xbe, 0x9b, 0xef, 0x36, 0xfe, 0x39, 0xdf, 0x6d, 0xfc, 0x77, 0xbe, 0xdb, + 0x18, 0xae, 0x99, 0x3f, 0x23, 0x7e, 0xf9, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x78, 0x66, + 0x06, 0xf4, 0x10, 0x00, 0x00, } func (m *Metrics) Marshal() (dAtA []byte, err error) { @@ -819,6 +877,16 @@ func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.CgroupStats != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.CgroupStats.Size())) + n6, err := m.CgroupStats.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -917,21 +985,21 @@ func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size())) - n6, err := m.Usage.MarshalTo(dAtA[i:]) + n7, err := m.Usage.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 } if m.Throttling != nil { dAtA[i] = 0x12 i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Throttling.Size())) - n7, err := m.Throttling.MarshalTo(dAtA[i:]) + n8, err := m.Throttling.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -970,21 +1038,21 @@ func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintMetrics(dAtA, i, uint64(m.User)) } if len(m.PerCPU) > 0 { - dAtA9 := make([]byte, len(m.PerCPU)*10) - var j8 int + dAtA10 := make([]byte, len(m.PerCPU)*10) + var j9 int for _, num := range m.PerCPU { for num >= 1<<7 { - dAtA9[j8] = uint8(uint64(num)&0x7f | 0x80) + dAtA10[j9] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j8++ + j9++ } - dAtA9[j8] = uint8(num) - j8++ + dAtA10[j9] = uint8(num) + j9++ } dAtA[i] = 0x22 i++ - i = encodeVarintMetrics(dAtA, i, uint64(j8)) - i += copy(dAtA[i:], dAtA9[:j8]) + i = encodeVarintMetrics(dAtA, i, uint64(j9)) + i += copy(dAtA[i:], dAtA10[:j9]) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1243,11 +1311,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2 i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size())) - n10, err := m.Usage.MarshalTo(dAtA[i:]) + n11, err := m.Usage.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 } if m.Swap != nil { dAtA[i] = 0x92 @@ -1255,11 +1323,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2 i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Swap.Size())) - n11, err := m.Swap.MarshalTo(dAtA[i:]) + n12, err := m.Swap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n12 } if m.Kernel != nil { dAtA[i] = 0x9a @@ -1267,11 +1335,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2 i++ i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel.Size())) - n12, err := m.Kernel.MarshalTo(dAtA[i:]) + n13, err := m.Kernel.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n13 } if m.KernelTCP != nil { dAtA[i] = 0xa2 @@ -1279,11 +1347,11 @@ func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2 i++ i = encodeVarintMetrics(dAtA, i, uint64(m.KernelTCP.Size())) - n13, err := m.KernelTCP.MarshalTo(dAtA[i:]) + n14, err := m.KernelTCP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n13 + i += n14 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -1646,6 +1714,52 @@ func (m *NetworkStat) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *CgroupStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CgroupStats) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NrSleeping != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.NrSleeping)) + } + if m.NrRunning != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.NrRunning)) + } + if m.NrStopped != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.NrStopped)) + } + if m.NrUninterruptible != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.NrUninterruptible)) + } + if m.NrIoWait != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintMetrics(dAtA, i, uint64(m.NrIoWait)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -1693,6 +1807,10 @@ func (m *Metrics) Size() (n int) { n += 1 + l + sovMetrics(uint64(l)) } } + if m.CgroupStats != nil { + l = m.CgroupStats.Size() + n += 1 + l + sovMetrics(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -2134,6 +2252,33 @@ func (m *NetworkStat) Size() (n int) { return n } +func (m *CgroupStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NrSleeping != 0 { + n += 1 + sovMetrics(uint64(m.NrSleeping)) + } + if m.NrRunning != 0 { + n += 1 + sovMetrics(uint64(m.NrRunning)) + } + if m.NrStopped != 0 { + n += 1 + sovMetrics(uint64(m.NrStopped)) + } + if m.NrUninterruptible != 0 { + n += 1 + sovMetrics(uint64(m.NrUninterruptible)) + } + if m.NrIoWait != 0 { + n += 1 + sovMetrics(uint64(m.NrIoWait)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovMetrics(x uint64) (n int) { for { n++ @@ -2159,6 +2304,7 @@ func (this *Metrics) String() string { `Blkio:` + strings.Replace(fmt.Sprintf("%v", this.Blkio), "BlkIOStat", "BlkIOStat", 1) + `,`, `Rdma:` + strings.Replace(fmt.Sprintf("%v", this.Rdma), "RdmaStat", "RdmaStat", 1) + `,`, `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "NetworkStat", "NetworkStat", 1) + `,`, + `CgroupStats:` + strings.Replace(fmt.Sprintf("%v", this.CgroupStats), "CgroupStats", "CgroupStats", 1) + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") @@ -2366,6 +2512,21 @@ func (this *NetworkStat) String() string { }, "") return s } +func (this *CgroupStats) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CgroupStats{`, + `NrSleeping:` + fmt.Sprintf("%v", this.NrSleeping) + `,`, + `NrRunning:` + fmt.Sprintf("%v", this.NrRunning) + `,`, + `NrStopped:` + fmt.Sprintf("%v", this.NrStopped) + `,`, + `NrUninterruptible:` + fmt.Sprintf("%v", this.NrUninterruptible) + `,`, + `NrIoWait:` + fmt.Sprintf("%v", this.NrIoWait) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} func valueToStringMetrics(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -2651,6 +2812,42 @@ func (m *Metrics) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CgroupStats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CgroupStats == nil { + m.CgroupStats = &CgroupStats{} + } + if err := m.CgroupStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) @@ -5256,6 +5453,155 @@ func (m *NetworkStat) Unmarshal(dAtA []byte) error { } return nil } +func (m *CgroupStats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CgroupStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CgroupStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrSleeping", wireType) + } + m.NrSleeping = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrSleeping |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrRunning", wireType) + } + m.NrRunning = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrRunning |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrStopped", wireType) + } + m.NrStopped = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrStopped |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrUninterruptible", wireType) + } + m.NrUninterruptible = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrUninterruptible |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NrIoWait", wireType) + } + m.NrIoWait = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NrIoWait |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipMetrics(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt index 7a960c67..b7e87f00 100644 --- a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt +++ b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt @@ -63,6 +63,14 @@ file { type_name: ".io.containerd.cgroups.v1.NetworkStat" json_name: "network" } + field { + name: "cgroup_stats" + number: 8 + label: LABEL_OPTIONAL + type: TYPE_MESSAGE + type_name: ".io.containerd.cgroups.v1.CgroupStats" + json_name: "cgroupStats" + } } message_type { name: "HugetlbStat" @@ -708,5 +716,43 @@ file { json_name: "txDropped" } } + message_type { + name: "CgroupStats" + field { + name: "nr_sleeping" + number: 1 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrSleeping" + } + field { + name: "nr_running" + number: 2 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrRunning" + } + field { + name: "nr_stopped" + number: 3 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrStopped" + } + field { + name: "nr_uninterruptible" + number: 4 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrUninterruptible" + } + field { + name: "nr_io_wait" + number: 5 + label: LABEL_OPTIONAL + type: TYPE_UINT64 + json_name: "nrIoWait" + } + } syntax: "proto3" } diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto b/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto index 62b51980..ba6be851 100644 --- a/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto +++ b/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto @@ -12,6 +12,7 @@ message Metrics { BlkIOStat blkio = 5; RdmaStat rdma = 6; repeated NetworkStat network = 7; + CgroupStats cgroup_stats = 8; } message HugetlbStat { @@ -134,3 +135,17 @@ message NetworkStat { uint64 tx_errors = 8; uint64 tx_dropped = 9; } + +// CgroupStats exports per-cgroup statistics. +message CgroupStats { + // number of tasks sleeping + uint64 nr_sleeping = 1; + // number of tasks running + uint64 nr_running = 2; + // number of tasks in stopped state + uint64 nr_stopped = 3; + // number of tasks in uninterruptible state + uint64 nr_uninterruptible = 4; + // number of tasks waiting on IO + uint64 nr_io_wait = 5; +} diff --git a/vendor/github.com/containerd/console/.travis.yml b/vendor/github.com/containerd/console/.travis.yml new file mode 100644 index 00000000..16827ec3 --- /dev/null +++ b/vendor/github.com/containerd/console/.travis.yml @@ -0,0 +1,27 @@ +language: go +go: + - "1.12.x" + - "1.13.x" + +go_import_path: github.com/containerd/console + +env: + - GO111MODULE=on + +install: + - pushd ..; go get -u github.com/vbatts/git-validation; popd + - pushd ..; go get -u github.com/kunalkushwaha/ltag; popd + +before_script: + - pushd ..; git clone https://github.com/containerd/project; popd + +script: + - DCO_VERBOSITY=-q ../project/script/validate/dco + - ../project/script/validate/fileheader ../project/ + - travis_wait ../project/script/validate/vendor + - go test -race + - GOOS=openbsd go build + - GOOS=openbsd go test -c + - GOOS=solaris go build + - GOOS=solaris go test -c + - GOOS=windows go test diff --git a/vendor/github.com/containerd/console/LICENSE b/vendor/github.com/containerd/console/LICENSE new file mode 100644 index 00000000..584149b6 --- /dev/null +++ b/vendor/github.com/containerd/console/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/console/README.md b/vendor/github.com/containerd/console/README.md new file mode 100644 index 00000000..5392fdaf --- /dev/null +++ b/vendor/github.com/containerd/console/README.md @@ -0,0 +1,27 @@ +# console + +[![Build Status](https://travis-ci.org/containerd/console.svg?branch=master)](https://travis-ci.org/containerd/console) + +Golang package for dealing with consoles. Light on deps and a simple API. + +## Modifying the current process + +```go +current := console.Current() +defer current.Reset() + +if err := current.SetRaw(); err != nil { +} +ws, err := current.Size() +current.Resize(ws) +``` + +## Project details + +console is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/console/console.go b/vendor/github.com/containerd/console/console.go new file mode 100644 index 00000000..6a36d147 --- /dev/null +++ b/vendor/github.com/containerd/console/console.go @@ -0,0 +1,81 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "errors" + "io" + "os" +) + +var ErrNotAConsole = errors.New("provided file is not a console") + +type File interface { + io.ReadWriteCloser + + // Fd returns its file descriptor + Fd() uintptr + // Name returns its file name + Name() string +} + +type Console interface { + File + + // Resize resizes the console to the provided window size + Resize(WinSize) error + // ResizeFrom resizes the calling console to the size of the + // provided console + ResizeFrom(Console) error + // SetRaw sets the console in raw mode + SetRaw() error + // DisableEcho disables echo on the console + DisableEcho() error + // Reset restores the console to its orignal state + Reset() error + // Size returns the window size of the console + Size() (WinSize, error) +} + +// WinSize specifies the window size of the console +type WinSize struct { + // Height of the console + Height uint16 + // Width of the console + Width uint16 + x uint16 + y uint16 +} + +// Current returns the current processes console +func Current() Console { + c, err := ConsoleFromFile(os.Stdin) + if err != nil { + // stdin should always be a console for the design + // of this function + panic(err) + } + return c +} + +// ConsoleFromFile returns a console using the provided file +func ConsoleFromFile(f File) (Console, error) { + if err := checkConsole(f); err != nil { + return nil, err + } + return newMaster(f) +} diff --git a/vendor/github.com/containerd/console/console_linux.go b/vendor/github.com/containerd/console/console_linux.go new file mode 100644 index 00000000..c1c839ee --- /dev/null +++ b/vendor/github.com/containerd/console/console_linux.go @@ -0,0 +1,280 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "io" + "os" + "sync" + + "golang.org/x/sys/unix" +) + +const ( + maxEvents = 128 +) + +// Epoller manages multiple epoll consoles using edge-triggered epoll api so we +// dont have to deal with repeated wake-up of EPOLLER or EPOLLHUP. +// For more details, see: +// - https://github.com/systemd/systemd/pull/4262 +// - https://github.com/moby/moby/issues/27202 +// +// Example usage of Epoller and EpollConsole can be as follow: +// +// epoller, _ := NewEpoller() +// epollConsole, _ := epoller.Add(console) +// go epoller.Wait() +// var ( +// b bytes.Buffer +// wg sync.WaitGroup +// ) +// wg.Add(1) +// go func() { +// io.Copy(&b, epollConsole) +// wg.Done() +// }() +// // perform I/O on the console +// epollConsole.Shutdown(epoller.CloseConsole) +// wg.Wait() +// epollConsole.Close() +type Epoller struct { + efd int + mu sync.Mutex + fdMapping map[int]*EpollConsole + closeOnce sync.Once +} + +// NewEpoller returns an instance of epoller with a valid epoll fd. +func NewEpoller() (*Epoller, error) { + efd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if err != nil { + return nil, err + } + return &Epoller{ + efd: efd, + fdMapping: make(map[int]*EpollConsole), + }, nil +} + +// Add creates an epoll console based on the provided console. The console will +// be registered with EPOLLET (i.e. using edge-triggered notification) and its +// file descriptor will be set to non-blocking mode. After this, user should use +// the return console to perform I/O. +func (e *Epoller) Add(console Console) (*EpollConsole, error) { + sysfd := int(console.Fd()) + // Set sysfd to non-blocking mode + if err := unix.SetNonblock(sysfd, true); err != nil { + return nil, err + } + + ev := unix.EpollEvent{ + Events: unix.EPOLLIN | unix.EPOLLOUT | unix.EPOLLRDHUP | unix.EPOLLET, + Fd: int32(sysfd), + } + if err := unix.EpollCtl(e.efd, unix.EPOLL_CTL_ADD, sysfd, &ev); err != nil { + return nil, err + } + ef := &EpollConsole{ + Console: console, + sysfd: sysfd, + readc: sync.NewCond(&sync.Mutex{}), + writec: sync.NewCond(&sync.Mutex{}), + } + e.mu.Lock() + e.fdMapping[sysfd] = ef + e.mu.Unlock() + return ef, nil +} + +// Wait starts the loop to wait for its consoles' notifications and signal +// appropriate console that it can perform I/O. +func (e *Epoller) Wait() error { + events := make([]unix.EpollEvent, maxEvents) + for { + n, err := unix.EpollWait(e.efd, events, -1) + if err != nil { + // EINTR: The call was interrupted by a signal handler before either + // any of the requested events occurred or the timeout expired + if err == unix.EINTR { + continue + } + return err + } + for i := 0; i < n; i++ { + ev := &events[i] + // the console is ready to be read from + if ev.Events&(unix.EPOLLIN|unix.EPOLLHUP|unix.EPOLLERR) != 0 { + if epfile := e.getConsole(int(ev.Fd)); epfile != nil { + epfile.signalRead() + } + } + // the console is ready to be written to + if ev.Events&(unix.EPOLLOUT|unix.EPOLLHUP|unix.EPOLLERR) != 0 { + if epfile := e.getConsole(int(ev.Fd)); epfile != nil { + epfile.signalWrite() + } + } + } + } +} + +// CloseConsole unregisters the console's file descriptor from epoll interface +func (e *Epoller) CloseConsole(fd int) error { + e.mu.Lock() + defer e.mu.Unlock() + delete(e.fdMapping, fd) + return unix.EpollCtl(e.efd, unix.EPOLL_CTL_DEL, fd, &unix.EpollEvent{}) +} + +func (e *Epoller) getConsole(sysfd int) *EpollConsole { + e.mu.Lock() + f := e.fdMapping[sysfd] + e.mu.Unlock() + return f +} + +// Close closes the epoll fd +func (e *Epoller) Close() error { + closeErr := os.ErrClosed // default to "file already closed" + e.closeOnce.Do(func() { + closeErr = unix.Close(e.efd) + }) + return closeErr +} + +// EpollConsole acts like a console but registers its file descriptor with an +// epoll fd and uses epoll API to perform I/O. +type EpollConsole struct { + Console + readc *sync.Cond + writec *sync.Cond + sysfd int + closed bool +} + +// Read reads up to len(p) bytes into p. It returns the number of bytes read +// (0 <= n <= len(p)) and any error encountered. +// +// If the console's read returns EAGAIN or EIO, we assume that it's a +// temporary error because the other side went away and wait for the signal +// generated by epoll event to continue. +func (ec *EpollConsole) Read(p []byte) (n int, err error) { + var read int + ec.readc.L.Lock() + defer ec.readc.L.Unlock() + for { + read, err = ec.Console.Read(p[n:]) + n += read + if err != nil { + var hangup bool + if perr, ok := err.(*os.PathError); ok { + hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) + } else { + hangup = (err == unix.EAGAIN || err == unix.EIO) + } + // if the other end disappear, assume this is temporary and wait for the + // signal to continue again. Unless we didnt read anything and the + // console is already marked as closed then we should exit + if hangup && !(n == 0 && len(p) > 0 && ec.closed) { + ec.readc.Wait() + continue + } + } + break + } + // if we didnt read anything then return io.EOF to end gracefully + if n == 0 && len(p) > 0 && err == nil { + err = io.EOF + } + // signal for others that we finished the read + ec.readc.Signal() + return n, err +} + +// Writes len(p) bytes from p to the console. It returns the number of bytes +// written from p (0 <= n <= len(p)) and any error encountered that caused +// the write to stop early. +// +// If writes to the console returns EAGAIN or EIO, we assume that it's a +// temporary error because the other side went away and wait for the signal +// generated by epoll event to continue. +func (ec *EpollConsole) Write(p []byte) (n int, err error) { + var written int + ec.writec.L.Lock() + defer ec.writec.L.Unlock() + for { + written, err = ec.Console.Write(p[n:]) + n += written + if err != nil { + var hangup bool + if perr, ok := err.(*os.PathError); ok { + hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) + } else { + hangup = (err == unix.EAGAIN || err == unix.EIO) + } + // if the other end disappears, assume this is temporary and wait for the + // signal to continue again. + if hangup { + ec.writec.Wait() + continue + } + } + // unrecoverable error, break the loop and return the error + break + } + if n < len(p) && err == nil { + err = io.ErrShortWrite + } + // signal for others that we finished the write + ec.writec.Signal() + return n, err +} + +// Shutdown closes the file descriptor and signals call waiters for this fd. +// It accepts a callback which will be called with the console's fd. The +// callback typically will be used to do further cleanup such as unregister the +// console's fd from the epoll interface. +// User should call Shutdown and wait for all I/O operation to be finished +// before closing the console. +func (ec *EpollConsole) Shutdown(close func(int) error) error { + ec.readc.L.Lock() + defer ec.readc.L.Unlock() + ec.writec.L.Lock() + defer ec.writec.L.Unlock() + + ec.readc.Broadcast() + ec.writec.Broadcast() + ec.closed = true + return close(ec.sysfd) +} + +// signalRead signals that the console is readable. +func (ec *EpollConsole) signalRead() { + ec.readc.L.Lock() + ec.readc.Signal() + ec.readc.L.Unlock() +} + +// signalWrite signals that the console is writable. +func (ec *EpollConsole) signalWrite() { + ec.writec.L.Lock() + ec.writec.Signal() + ec.writec.L.Unlock() +} diff --git a/vendor/github.com/containerd/console/console_unix.go b/vendor/github.com/containerd/console/console_unix.go new file mode 100644 index 00000000..315f1d0c --- /dev/null +++ b/vendor/github.com/containerd/console/console_unix.go @@ -0,0 +1,158 @@ +// +build darwin freebsd linux openbsd solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// NewPty creates a new pty pair +// The master is returned as the first console and a string +// with the path to the pty slave is returned as the second +func NewPty() (Console, string, error) { + f, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, "", err + } + slave, err := ptsname(f) + if err != nil { + return nil, "", err + } + if err := unlockpt(f); err != nil { + return nil, "", err + } + m, err := newMaster(f) + if err != nil { + return nil, "", err + } + return m, slave, nil +} + +type master struct { + f File + original *unix.Termios +} + +func (m *master) Read(b []byte) (int, error) { + return m.f.Read(b) +} + +func (m *master) Write(b []byte) (int, error) { + return m.f.Write(b) +} + +func (m *master) Close() error { + return m.f.Close() +} + +func (m *master) Resize(ws WinSize) error { + return tcswinsz(m.f.Fd(), ws) +} + +func (m *master) ResizeFrom(c Console) error { + ws, err := c.Size() + if err != nil { + return err + } + return m.Resize(ws) +} + +func (m *master) Reset() error { + if m.original == nil { + return nil + } + return tcset(m.f.Fd(), m.original) +} + +func (m *master) getCurrent() (unix.Termios, error) { + var termios unix.Termios + if err := tcget(m.f.Fd(), &termios); err != nil { + return unix.Termios{}, err + } + return termios, nil +} + +func (m *master) SetRaw() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + rawState = cfmakeraw(rawState) + rawState.Oflag = rawState.Oflag | unix.OPOST + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) DisableEcho() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + rawState.Lflag = rawState.Lflag &^ unix.ECHO + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) Size() (WinSize, error) { + return tcgwinsz(m.f.Fd()) +} + +func (m *master) Fd() uintptr { + return m.f.Fd() +} + +func (m *master) Name() string { + return m.f.Name() +} + +// checkConsole checks if the provided file is a console +func checkConsole(f File) error { + var termios unix.Termios + if tcget(f.Fd(), &termios) != nil { + return ErrNotAConsole + } + return nil +} + +func newMaster(f File) (Console, error) { + m := &master{ + f: f, + } + t, err := m.getCurrent() + if err != nil { + return nil, err + } + m.original = &t + return m, nil +} + +// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair +// created by us acts normally. In particular, a not-very-well-known default of +// Linux unix98 ptys is that they have +onlcr by default. While this isn't a +// problem for terminal emulators, because we relay data from the terminal we +// also relay that funky line discipline. +func ClearONLCR(fd uintptr) error { + return setONLCR(fd, false) +} + +// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair +// created by us acts as intended for a terminal emulator. +func SetONLCR(fd uintptr) error { + return setONLCR(fd, true) +} diff --git a/vendor/github.com/containerd/console/console_windows.go b/vendor/github.com/containerd/console/console_windows.go new file mode 100644 index 00000000..129a9288 --- /dev/null +++ b/vendor/github.com/containerd/console/console_windows.go @@ -0,0 +1,216 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "fmt" + "os" + + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +var ( + vtInputSupported bool + ErrNotImplemented = errors.New("not implemented") +) + +func (m *master) initStdios() { + m.in = windows.Handle(os.Stdin.Fd()) + if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil { + // Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. + if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + windows.SetConsoleMode(m.in, m.inMode) + } else { + fmt.Printf("failed to get console mode for stdin: %v\n", err) + } + + m.out = windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil { + if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { + m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + windows.SetConsoleMode(m.out, m.outMode) + } + } else { + fmt.Printf("failed to get console mode for stdout: %v\n", err) + } + + m.err = windows.Handle(os.Stderr.Fd()) + if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil { + if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { + m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + windows.SetConsoleMode(m.err, m.errMode) + } + } else { + fmt.Printf("failed to get console mode for stderr: %v\n", err) + } +} + +type master struct { + in windows.Handle + inMode uint32 + + out windows.Handle + outMode uint32 + + err windows.Handle + errMode uint32 +} + +func (m *master) SetRaw() error { + if err := makeInputRaw(m.in, m.inMode); err != nil { + return err + } + + // Set StdOut and StdErr to raw mode, we ignore failures since + // windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of + // Windows. + + windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN) + + windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN) + + return nil +} + +func (m *master) Reset() error { + for _, s := range []struct { + fd windows.Handle + mode uint32 + }{ + {m.in, m.inMode}, + {m.out, m.outMode}, + {m.err, m.errMode}, + } { + if err := windows.SetConsoleMode(s.fd, s.mode); err != nil { + return errors.Wrap(err, "unable to restore console mode") + } + } + + return nil +} + +func (m *master) Size() (WinSize, error) { + var info windows.ConsoleScreenBufferInfo + err := windows.GetConsoleScreenBufferInfo(m.out, &info) + if err != nil { + return WinSize{}, errors.Wrap(err, "unable to get console info") + } + + winsize := WinSize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +func (m *master) Resize(ws WinSize) error { + return ErrNotImplemented +} + +func (m *master) ResizeFrom(c Console) error { + return ErrNotImplemented +} + +func (m *master) DisableEcho() error { + mode := m.inMode &^ windows.ENABLE_ECHO_INPUT + mode |= windows.ENABLE_PROCESSED_INPUT + mode |= windows.ENABLE_LINE_INPUT + + if err := windows.SetConsoleMode(m.in, mode); err != nil { + return errors.Wrap(err, "unable to set console to disable echo") + } + + return nil +} + +func (m *master) Close() error { + return nil +} + +func (m *master) Read(b []byte) (int, error) { + return os.Stdin.Read(b) +} + +func (m *master) Write(b []byte) (int, error) { + return os.Stdout.Write(b) +} + +func (m *master) Fd() uintptr { + return uintptr(m.in) +} + +// on windows, console can only be made from os.Std{in,out,err}, hence there +// isnt a single name here we can use. Return a dummy "console" value in this +// case should be sufficient. +func (m *master) Name() string { + return "console" +} + +// makeInputRaw puts the terminal (Windows Console) connected to the given +// file descriptor into raw mode +func makeInputRaw(fd windows.Handle, mode uint32) error { + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= windows.ENABLE_ECHO_INPUT + mode &^= windows.ENABLE_LINE_INPUT + mode &^= windows.ENABLE_MOUSE_INPUT + mode &^= windows.ENABLE_WINDOW_INPUT + mode &^= windows.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= windows.ENABLE_EXTENDED_FLAGS + mode |= windows.ENABLE_INSERT_MODE + mode |= windows.ENABLE_QUICK_EDIT_MODE + + if vtInputSupported { + mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT + } + + if err := windows.SetConsoleMode(fd, mode); err != nil { + return errors.Wrap(err, "unable to set console to raw mode") + } + + return nil +} + +func checkConsole(f File) error { + var mode uint32 + if err := windows.GetConsoleMode(windows.Handle(f.Fd()), &mode); err != nil { + return err + } + return nil +} + +func newMaster(f File) (Console, error) { + if f != os.Stdin && f != os.Stdout && f != os.Stderr { + return nil, errors.New("creating a console from a file is not supported on windows") + } + m := &master{} + m.initStdios() + return m, nil +} diff --git a/vendor/github.com/containerd/console/go.mod b/vendor/github.com/containerd/console/go.mod new file mode 100644 index 00000000..97b587d6 --- /dev/null +++ b/vendor/github.com/containerd/console/go.mod @@ -0,0 +1,8 @@ +module github.com/containerd/console + +go 1.13 + +require ( + github.com/pkg/errors v0.8.1 + golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e +) diff --git a/vendor/github.com/containerd/console/go.sum b/vendor/github.com/containerd/console/go.sum new file mode 100644 index 00000000..25205cc9 --- /dev/null +++ b/vendor/github.com/containerd/console/go.sum @@ -0,0 +1,4 @@ +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/containerd/console/tc_darwin.go b/vendor/github.com/containerd/console/tc_darwin.go new file mode 100644 index 00000000..b0128abb --- /dev/null +++ b/vendor/github.com/containerd/console/tc_darwin.go @@ -0,0 +1,53 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +func ioctl(fd, flag, data uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + return ioctl(f.Fd(), unix.TIOCPTYUNLK, uintptr(unsafe.Pointer(&u))) +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_freebsd.go b/vendor/github.com/containerd/console/tc_freebsd.go new file mode 100644 index 00000000..04583a61 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_freebsd.go @@ -0,0 +1,45 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +// This does not exist on FreeBSD, it does not allocate controlling terminals on open +func unlockpt(f *os.File) error { + return nil +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_linux.go b/vendor/github.com/containerd/console/tc_linux.go new file mode 100644 index 00000000..1bdd68e6 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_linux.go @@ -0,0 +1,49 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))); err != 0 { + return err + } + return nil +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + var u uint32 + if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&u))); err != 0 { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", u), nil +} diff --git a/vendor/github.com/containerd/console/tc_openbsd_cgo.go b/vendor/github.com/containerd/console/tc_openbsd_cgo.go new file mode 100644 index 00000000..f0cec06a --- /dev/null +++ b/vendor/github.com/containerd/console/tc_openbsd_cgo.go @@ -0,0 +1,51 @@ +// +build openbsd,cgo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +//#include +import "C" + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + ptspath, err := C.ptsname(C.int(f.Fd())) + if err != nil { + return "", err + } + return C.GoString(ptspath), nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + if _, err := C.grantpt(C.int(f.Fd())); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/containerd/console/tc_openbsd_nocgo.go b/vendor/github.com/containerd/console/tc_openbsd_nocgo.go new file mode 100644 index 00000000..daccce20 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_openbsd_nocgo.go @@ -0,0 +1,47 @@ +// +build openbsd,!cgo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// +// Implementing the functions below requires cgo support. Non-cgo stubs +// versions are defined below to enable cross-compilation of source code +// that depends on these functions, but the resultant cross-compiled +// binaries cannot actually be used. If the stub function(s) below are +// actually invoked they will display an error message and cause the +// calling process to exit. +// + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +func ptsname(f *os.File) (string, error) { + panic("ptsname() support requires cgo.") +} + +func unlockpt(f *os.File) error { + panic("unlockpt() support requires cgo.") +} diff --git a/vendor/github.com/containerd/console/tc_solaris_cgo.go b/vendor/github.com/containerd/console/tc_solaris_cgo.go new file mode 100644 index 00000000..e36a68ed --- /dev/null +++ b/vendor/github.com/containerd/console/tc_solaris_cgo.go @@ -0,0 +1,51 @@ +// +build solaris,cgo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +//#include +import "C" + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + ptspath, err := C.ptsname(C.int(f.Fd())) + if err != nil { + return "", err + } + return C.GoString(ptspath), nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + if _, err := C.grantpt(C.int(f.Fd())); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/containerd/console/tc_solaris_nocgo.go b/vendor/github.com/containerd/console/tc_solaris_nocgo.go new file mode 100644 index 00000000..eb0bd2c3 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_solaris_nocgo.go @@ -0,0 +1,47 @@ +// +build solaris,!cgo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// +// Implementing the functions below requires cgo support. Non-cgo stubs +// versions are defined below to enable cross-compilation of source code +// that depends on these functions, but the resultant cross-compiled +// binaries cannot actually be used. If the stub function(s) below are +// actually invoked they will display an error message and cause the +// calling process to exit. +// + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +func ptsname(f *os.File) (string, error) { + panic("ptsname() support requires cgo.") +} + +func unlockpt(f *os.File) error { + panic("unlockpt() support requires cgo.") +} diff --git a/vendor/github.com/containerd/console/tc_unix.go b/vendor/github.com/containerd/console/tc_unix.go new file mode 100644 index 00000000..7ae773c5 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_unix.go @@ -0,0 +1,91 @@ +// +build darwin freebsd linux openbsd solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package console + +import ( + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr, p *unix.Termios) error { + termios, err := unix.IoctlGetTermios(int(fd), cmdTcGet) + if err != nil { + return err + } + *p = *termios + return nil +} + +func tcset(fd uintptr, p *unix.Termios) error { + return unix.IoctlSetTermios(int(fd), cmdTcSet, p) +} + +func tcgwinsz(fd uintptr) (WinSize, error) { + var ws WinSize + + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + if err != nil { + return ws, err + } + + // Translate from unix.Winsize to console.WinSize + ws.Height = uws.Row + ws.Width = uws.Col + ws.x = uws.Xpixel + ws.y = uws.Ypixel + return ws, nil +} + +func tcswinsz(fd uintptr, ws WinSize) error { + // Translate from console.WinSize to unix.Winsize + + var uws unix.Winsize + uws.Row = ws.Height + uws.Col = ws.Width + uws.Xpixel = ws.x + uws.Ypixel = ws.y + + return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &uws) +} + +func setONLCR(fd uintptr, enable bool) error { + var termios unix.Termios + if err := tcget(fd, &termios); err != nil { + return err + } + if enable { + // Set +onlcr so we can act like a real terminal + termios.Oflag |= unix.ONLCR + } else { + // Set -onlcr so we don't have to deal with \r. + termios.Oflag &^= unix.ONLCR + } + return tcset(fd, &termios) +} + +func cfmakeraw(t unix.Termios) unix.Termios { + t.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + t.Oflag &^= unix.OPOST + t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + t.Cflag &^= (unix.CSIZE | unix.PARENB) + t.Cflag &^= unix.CS8 + t.Cc[unix.VMIN] = 1 + t.Cc[unix.VTIME] = 0 + + return t +} diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go new file mode 100644 index 00000000..1cf0aaa9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go @@ -0,0 +1,5479 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/containerd/containerd/api/services/content/v1/content.proto + +package content + +import ( + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + types "github.com/gogo/protobuf/types" + github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// WriteAction defines the behavior of a WriteRequest. +type WriteAction int32 + +const ( + // WriteActionStat instructs the writer to return the current status while + // holding the lock on the write. + WriteActionStat WriteAction = 0 + // WriteActionWrite sets the action for the write request to write data. + // + // Any data included will be written at the provided offset. The + // transaction will be left open for further writes. + // + // This is the default. + WriteActionWrite WriteAction = 1 + // WriteActionCommit will write any outstanding data in the message and + // commit the write, storing it under the digest. + // + // This can be used in a single message to send the data, verify it and + // commit it. + // + // This action will always terminate the write. + WriteActionCommit WriteAction = 2 +) + +var WriteAction_name = map[int32]string{ + 0: "STAT", + 1: "WRITE", + 2: "COMMIT", +} + +var WriteAction_value = map[string]int32{ + "STAT": 0, + "WRITE": 1, + "COMMIT": 2, +} + +func (x WriteAction) String() string { + return proto.EnumName(WriteAction_name, int32(x)) +} + +func (WriteAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{0} +} + +type Info struct { + // Digest is the hash identity of the blob. + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + // Size is the total number of bytes in the blob. + Size_ int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` + // CreatedAt provides the time at which the blob was committed. + CreatedAt time.Time `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` + // UpdatedAt provides the time the info was last updated. + UpdatedAt time.Time `protobuf:"bytes,4,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` + // Labels are arbitrary data on snapshots. + // + // The combined size of a key/value pair cannot exceed 4096 bytes. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info) Reset() { *m = Info{} } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{0} +} +func (m *Info) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Info.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Info) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info.Merge(m, src) +} +func (m *Info) XXX_Size() int { + return m.Size() +} +func (m *Info) XXX_DiscardUnknown() { + xxx_messageInfo_Info.DiscardUnknown(m) +} + +var xxx_messageInfo_Info proto.InternalMessageInfo + +type InfoRequest struct { + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InfoRequest) Reset() { *m = InfoRequest{} } +func (*InfoRequest) ProtoMessage() {} +func (*InfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{1} +} +func (m *InfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoRequest.Merge(m, src) +} +func (m *InfoRequest) XXX_Size() int { + return m.Size() +} +func (m *InfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InfoRequest proto.InternalMessageInfo + +type InfoResponse struct { + Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InfoResponse) Reset() { *m = InfoResponse{} } +func (*InfoResponse) ProtoMessage() {} +func (*InfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{2} +} +func (m *InfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoResponse.Merge(m, src) +} +func (m *InfoResponse) XXX_Size() int { + return m.Size() +} +func (m *InfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InfoResponse proto.InternalMessageInfo + +type UpdateRequest struct { + Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` + // UpdateMask specifies which fields to perform the update on. If empty, + // the operation applies to all fields. + // + // In info, Digest, Size, and CreatedAt are immutable, + // other field may be updated using this mask. + // If no mask is provided, all mutable field are updated. + UpdateMask *types.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateRequest) Reset() { *m = UpdateRequest{} } +func (*UpdateRequest) ProtoMessage() {} +func (*UpdateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{3} +} +func (m *UpdateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateRequest.Merge(m, src) +} +func (m *UpdateRequest) XXX_Size() int { + return m.Size() +} +func (m *UpdateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateRequest proto.InternalMessageInfo + +type UpdateResponse struct { + Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateResponse) Reset() { *m = UpdateResponse{} } +func (*UpdateResponse) ProtoMessage() {} +func (*UpdateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{4} +} +func (m *UpdateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateResponse.Merge(m, src) +} +func (m *UpdateResponse) XXX_Size() int { + return m.Size() +} +func (m *UpdateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateResponse proto.InternalMessageInfo + +type ListContentRequest struct { + // Filters contains one or more filters using the syntax defined in the + // containerd filter package. + // + // The returned result will be those that match any of the provided + // filters. Expanded, containers that match the following will be + // returned: + // + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // + // If filters is zero-length or nil, all items will be returned. + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListContentRequest) Reset() { *m = ListContentRequest{} } +func (*ListContentRequest) ProtoMessage() {} +func (*ListContentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{5} +} +func (m *ListContentRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListContentRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListContentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListContentRequest.Merge(m, src) +} +func (m *ListContentRequest) XXX_Size() int { + return m.Size() +} +func (m *ListContentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListContentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListContentRequest proto.InternalMessageInfo + +type ListContentResponse struct { + Info []Info `protobuf:"bytes,1,rep,name=info,proto3" json:"info"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListContentResponse) Reset() { *m = ListContentResponse{} } +func (*ListContentResponse) ProtoMessage() {} +func (*ListContentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{6} +} +func (m *ListContentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListContentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListContentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListContentResponse.Merge(m, src) +} +func (m *ListContentResponse) XXX_Size() int { + return m.Size() +} +func (m *ListContentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListContentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListContentResponse proto.InternalMessageInfo + +type DeleteContentRequest struct { + // Digest specifies which content to delete. + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteContentRequest) Reset() { *m = DeleteContentRequest{} } +func (*DeleteContentRequest) ProtoMessage() {} +func (*DeleteContentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{7} +} +func (m *DeleteContentRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeleteContentRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeleteContentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteContentRequest.Merge(m, src) +} +func (m *DeleteContentRequest) XXX_Size() int { + return m.Size() +} +func (m *DeleteContentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteContentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteContentRequest proto.InternalMessageInfo + +// ReadContentRequest defines the fields that make up a request to read a portion of +// data from a stored object. +type ReadContentRequest struct { + // Digest is the hash identity to read. + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + // Offset specifies the number of bytes from the start at which to begin + // the read. If zero or less, the read will be from the start. This uses + // standard zero-indexed semantics. + Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` + // size is the total size of the read. If zero, the entire blob will be + // returned by the service. + Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadContentRequest) Reset() { *m = ReadContentRequest{} } +func (*ReadContentRequest) ProtoMessage() {} +func (*ReadContentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{8} +} +func (m *ReadContentRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadContentRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadContentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadContentRequest.Merge(m, src) +} +func (m *ReadContentRequest) XXX_Size() int { + return m.Size() +} +func (m *ReadContentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReadContentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadContentRequest proto.InternalMessageInfo + +// ReadContentResponse carries byte data for a read request. +type ReadContentResponse struct { + Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadContentResponse) Reset() { *m = ReadContentResponse{} } +func (*ReadContentResponse) ProtoMessage() {} +func (*ReadContentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{9} +} +func (m *ReadContentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadContentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadContentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadContentResponse.Merge(m, src) +} +func (m *ReadContentResponse) XXX_Size() int { + return m.Size() +} +func (m *ReadContentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadContentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadContentResponse proto.InternalMessageInfo + +type Status struct { + StartedAt time.Time `protobuf:"bytes,1,opt,name=started_at,json=startedAt,proto3,stdtime" json:"started_at"` + UpdatedAt time.Time `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` + Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` + Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` + Expected github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=expected,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{10} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return m.Size() +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +type StatusRequest struct { + Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{11} +} +func (m *StatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusRequest.Merge(m, src) +} +func (m *StatusRequest) XXX_Size() int { + return m.Size() +} +func (m *StatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusRequest proto.InternalMessageInfo + +type StatusResponse struct { + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{12} +} +func (m *StatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusResponse.Merge(m, src) +} +func (m *StatusResponse) XXX_Size() int { + return m.Size() +} +func (m *StatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusResponse proto.InternalMessageInfo + +type ListStatusesRequest struct { + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListStatusesRequest) Reset() { *m = ListStatusesRequest{} } +func (*ListStatusesRequest) ProtoMessage() {} +func (*ListStatusesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{13} +} +func (m *ListStatusesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListStatusesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListStatusesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListStatusesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListStatusesRequest.Merge(m, src) +} +func (m *ListStatusesRequest) XXX_Size() int { + return m.Size() +} +func (m *ListStatusesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListStatusesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListStatusesRequest proto.InternalMessageInfo + +type ListStatusesResponse struct { + Statuses []Status `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListStatusesResponse) Reset() { *m = ListStatusesResponse{} } +func (*ListStatusesResponse) ProtoMessage() {} +func (*ListStatusesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{14} +} +func (m *ListStatusesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListStatusesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListStatusesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListStatusesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListStatusesResponse.Merge(m, src) +} +func (m *ListStatusesResponse) XXX_Size() int { + return m.Size() +} +func (m *ListStatusesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListStatusesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListStatusesResponse proto.InternalMessageInfo + +// WriteContentRequest writes data to the request ref at offset. +type WriteContentRequest struct { + // Action sets the behavior of the write. + // + // When this is a write and the ref is not yet allocated, the ref will be + // allocated and the data will be written at offset. + // + // If the action is write and the ref is allocated, it will accept data to + // an offset that has not yet been written. + // + // If the action is write and there is no data, the current write status + // will be returned. This works differently from status because the stream + // holds a lock. + Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.services.content.v1.WriteAction" json:"action,omitempty"` + // Ref identifies the pre-commit object to write to. + Ref string `protobuf:"bytes,2,opt,name=ref,proto3" json:"ref,omitempty"` + // Total can be set to have the service validate the total size of the + // committed content. + // + // The latest value before or with the commit action message will be use to + // validate the content. If the offset overflows total, the service may + // report an error. It is only required on one message for the write. + // + // If the value is zero or less, no validation of the final content will be + // performed. + Total int64 `protobuf:"varint,3,opt,name=total,proto3" json:"total,omitempty"` + // Expected can be set to have the service validate the final content against + // the provided digest. + // + // If the digest is already present in the object store, an AlreadyExists + // error will be returned. + // + // Only the latest version will be used to check the content against the + // digest. It is only required to include it on a single message, before or + // with the commit action message. + Expected github_com_opencontainers_go_digest.Digest `protobuf:"bytes,4,opt,name=expected,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected"` + // Offset specifies the number of bytes from the start at which to begin + // the write. For most implementations, this means from the start of the + // file. This uses standard, zero-indexed semantics. + // + // If the action is write, the remote may remove all previously written + // data after the offset. Implementations may support arbitrary offsets but + // MUST support reseting this value to zero with a write. If an + // implementation does not support a write at a particular offset, an + // OutOfRange error must be returned. + Offset int64 `protobuf:"varint,5,opt,name=offset,proto3" json:"offset,omitempty"` + // Data is the actual bytes to be written. + // + // If this is empty and the message is not a commit, a response will be + // returned with the current write state. + Data []byte `protobuf:"bytes,6,opt,name=data,proto3" json:"data,omitempty"` + // Labels are arbitrary data on snapshots. + // + // The combined size of a key/value pair cannot exceed 4096 bytes. + Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteContentRequest) Reset() { *m = WriteContentRequest{} } +func (*WriteContentRequest) ProtoMessage() {} +func (*WriteContentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{15} +} +func (m *WriteContentRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WriteContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WriteContentRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WriteContentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteContentRequest.Merge(m, src) +} +func (m *WriteContentRequest) XXX_Size() int { + return m.Size() +} +func (m *WriteContentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WriteContentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteContentRequest proto.InternalMessageInfo + +// WriteContentResponse is returned on the culmination of a write call. +type WriteContentResponse struct { + // Action contains the action for the final message of the stream. A writer + // should confirm that they match the intended result. + Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.services.content.v1.WriteAction" json:"action,omitempty"` + // StartedAt provides the time at which the write began. + // + // This must be set for stat and commit write actions. All other write + // actions may omit this. + StartedAt time.Time `protobuf:"bytes,2,opt,name=started_at,json=startedAt,proto3,stdtime" json:"started_at"` + // UpdatedAt provides the last time of a successful write. + // + // This must be set for stat and commit write actions. All other write + // actions may omit this. + UpdatedAt time.Time `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` + // Offset is the current committed size for the write. + Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + // Total provides the current, expected total size of the write. + // + // We include this to provide consistency with the Status structure on the + // client writer. + // + // This is only valid on the Stat and Commit response. + Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` + // Digest, if present, includes the digest up to the currently committed + // bytes. If action is commit, this field will be set. It is implementation + // defined if this is set for other actions. + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteContentResponse) Reset() { *m = WriteContentResponse{} } +func (*WriteContentResponse) ProtoMessage() {} +func (*WriteContentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{16} +} +func (m *WriteContentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WriteContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WriteContentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WriteContentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteContentResponse.Merge(m, src) +} +func (m *WriteContentResponse) XXX_Size() int { + return m.Size() +} +func (m *WriteContentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WriteContentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteContentResponse proto.InternalMessageInfo + +type AbortRequest struct { + Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AbortRequest) Reset() { *m = AbortRequest{} } +func (*AbortRequest) ProtoMessage() {} +func (*AbortRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_468430ba3e400391, []int{17} +} +func (m *AbortRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AbortRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AbortRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AbortRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AbortRequest.Merge(m, src) +} +func (m *AbortRequest) XXX_Size() int { + return m.Size() +} +func (m *AbortRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AbortRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AbortRequest proto.InternalMessageInfo + +func init() { + proto.RegisterEnum("containerd.services.content.v1.WriteAction", WriteAction_name, WriteAction_value) + proto.RegisterType((*Info)(nil), "containerd.services.content.v1.Info") + proto.RegisterMapType((map[string]string)(nil), "containerd.services.content.v1.Info.LabelsEntry") + proto.RegisterType((*InfoRequest)(nil), "containerd.services.content.v1.InfoRequest") + proto.RegisterType((*InfoResponse)(nil), "containerd.services.content.v1.InfoResponse") + proto.RegisterType((*UpdateRequest)(nil), "containerd.services.content.v1.UpdateRequest") + proto.RegisterType((*UpdateResponse)(nil), "containerd.services.content.v1.UpdateResponse") + proto.RegisterType((*ListContentRequest)(nil), "containerd.services.content.v1.ListContentRequest") + proto.RegisterType((*ListContentResponse)(nil), "containerd.services.content.v1.ListContentResponse") + proto.RegisterType((*DeleteContentRequest)(nil), "containerd.services.content.v1.DeleteContentRequest") + proto.RegisterType((*ReadContentRequest)(nil), "containerd.services.content.v1.ReadContentRequest") + proto.RegisterType((*ReadContentResponse)(nil), "containerd.services.content.v1.ReadContentResponse") + proto.RegisterType((*Status)(nil), "containerd.services.content.v1.Status") + proto.RegisterType((*StatusRequest)(nil), "containerd.services.content.v1.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "containerd.services.content.v1.StatusResponse") + proto.RegisterType((*ListStatusesRequest)(nil), "containerd.services.content.v1.ListStatusesRequest") + proto.RegisterType((*ListStatusesResponse)(nil), "containerd.services.content.v1.ListStatusesResponse") + proto.RegisterType((*WriteContentRequest)(nil), "containerd.services.content.v1.WriteContentRequest") + proto.RegisterMapType((map[string]string)(nil), "containerd.services.content.v1.WriteContentRequest.LabelsEntry") + proto.RegisterType((*WriteContentResponse)(nil), "containerd.services.content.v1.WriteContentResponse") + proto.RegisterType((*AbortRequest)(nil), "containerd.services.content.v1.AbortRequest") +} + +func init() { + proto.RegisterFile("github.com/containerd/containerd/api/services/content/v1/content.proto", fileDescriptor_468430ba3e400391) +} + +var fileDescriptor_468430ba3e400391 = []byte{ + // 1081 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0x78, 0xed, 0x4d, 0xf2, 0x9c, 0x16, 0x33, 0x31, 0x95, 0xb5, 0x08, 0x67, 0xbb, 0x42, + 0xc8, 0x6a, 0xc9, 0x3a, 0x75, 0x7a, 0x00, 0x2a, 0x01, 0x8e, 0x9b, 0xaa, 0x41, 0x4d, 0x41, 0x5b, + 0x97, 0x40, 0x2f, 0x65, 0x6d, 0x8f, 0xcd, 0x2a, 0xb6, 0xd7, 0xdd, 0x19, 0x5b, 0x84, 0x13, 0x17, + 0x24, 0x14, 0xf5, 0x80, 0xb8, 0xe7, 0x02, 0xfc, 0x15, 0x1c, 0x38, 0xe7, 0xc8, 0x11, 0x71, 0x68, + 0x69, 0xfe, 0x07, 0xee, 0x68, 0x66, 0x67, 0xed, 0xf5, 0x47, 0x58, 0xdb, 0x31, 0x27, 0xbf, 0x99, + 0x7d, 0xbf, 0xf7, 0xfd, 0x31, 0x86, 0x7b, 0x4d, 0x87, 0x7d, 0xdd, 0xab, 0x9a, 0x35, 0xb7, 0x5d, + 0xa8, 0xb9, 0x1d, 0x66, 0x3b, 0x1d, 0xe2, 0xd5, 0xc3, 0xa4, 0xdd, 0x75, 0x0a, 0x94, 0x78, 0x7d, + 0xa7, 0x46, 0xa8, 0xb8, 0x27, 0x1d, 0x56, 0xe8, 0xdf, 0x0a, 0x48, 0xb3, 0xeb, 0xb9, 0xcc, 0xc5, + 0xb9, 0x21, 0xc2, 0x0c, 0xb8, 0xcd, 0x80, 0xa5, 0x7f, 0x4b, 0xcb, 0x34, 0xdd, 0xa6, 0x2b, 0x58, + 0x0b, 0x9c, 0xf2, 0x51, 0x9a, 0xde, 0x74, 0xdd, 0x66, 0x8b, 0x14, 0xc4, 0xa9, 0xda, 0x6b, 0x14, + 0x1a, 0x0e, 0x69, 0xd5, 0x9f, 0xb6, 0x6d, 0x7a, 0x24, 0x39, 0x36, 0xc7, 0x39, 0x98, 0xd3, 0x26, + 0x94, 0xd9, 0xed, 0xae, 0x64, 0x78, 0x73, 0x9c, 0x81, 0xb4, 0xbb, 0xec, 0xd8, 0xff, 0x68, 0xfc, + 0x13, 0x87, 0xc4, 0x7e, 0xa7, 0xe1, 0xe2, 0x4f, 0x40, 0xad, 0x3b, 0x4d, 0x42, 0x59, 0x16, 0xe9, + 0x28, 0xbf, 0xb6, 0x5b, 0x3c, 0x7b, 0xb1, 0x19, 0xfb, 0xeb, 0xc5, 0xe6, 0x8d, 0x90, 0xfb, 0x6e, + 0x97, 0x74, 0x06, 0x5e, 0xd0, 0x42, 0xd3, 0xdd, 0xf2, 0x21, 0xe6, 0x5d, 0xf1, 0x63, 0x49, 0x09, + 0x18, 0x43, 0x82, 0x3a, 0xdf, 0x92, 0x6c, 0x5c, 0x47, 0x79, 0xc5, 0x12, 0x34, 0x2e, 0x03, 0xd4, + 0x3c, 0x62, 0x33, 0x52, 0x7f, 0x6a, 0xb3, 0xac, 0xa2, 0xa3, 0x7c, 0xaa, 0xa8, 0x99, 0xbe, 0x69, + 0x66, 0x60, 0x9a, 0x59, 0x09, 0x6c, 0xdf, 0x5d, 0xe5, 0xfa, 0x7f, 0x7c, 0xb9, 0x89, 0xac, 0x35, + 0x89, 0x2b, 0x31, 0x2e, 0xa4, 0xd7, 0xad, 0x07, 0x42, 0x12, 0xf3, 0x08, 0x91, 0xb8, 0x12, 0xc3, + 0xf7, 0x41, 0x6d, 0xd9, 0x55, 0xd2, 0xa2, 0xd9, 0xa4, 0xae, 0xe4, 0x53, 0xc5, 0x6d, 0xf3, 0xbf, + 0x33, 0x63, 0xf2, 0xf8, 0x98, 0x0f, 0x04, 0x64, 0xaf, 0xc3, 0xbc, 0x63, 0x4b, 0xe2, 0xb5, 0xf7, + 0x21, 0x15, 0xba, 0xc6, 0x69, 0x50, 0x8e, 0xc8, 0xb1, 0x1f, 0x3f, 0x8b, 0x93, 0x38, 0x03, 0xc9, + 0xbe, 0xdd, 0xea, 0xf9, 0x91, 0x58, 0xb3, 0xfc, 0xc3, 0x07, 0xf1, 0xf7, 0x90, 0xf1, 0x25, 0xa4, + 0xb8, 0x58, 0x8b, 0x3c, 0xeb, 0xf1, 0x88, 0x2d, 0x31, 0xfa, 0xc6, 0x43, 0x58, 0xf7, 0x45, 0xd3, + 0xae, 0xdb, 0xa1, 0x04, 0x7f, 0x08, 0x09, 0xa7, 0xd3, 0x70, 0x85, 0xe4, 0x54, 0xf1, 0xed, 0x59, + 0xbc, 0xdd, 0x4d, 0x70, 0xfd, 0x96, 0xc0, 0x19, 0xcf, 0x11, 0x5c, 0x79, 0x2c, 0xa2, 0x17, 0x58, + 0x7b, 0x49, 0x89, 0xf8, 0x0e, 0xa4, 0xfc, 0x74, 0x88, 0x3a, 0x16, 0xc1, 0x99, 0x96, 0xc7, 0x7b, + 0xbc, 0xd4, 0x0f, 0x6c, 0x7a, 0x64, 0xc9, 0xac, 0x73, 0xda, 0xf8, 0x0c, 0xae, 0x06, 0xd6, 0x2c, + 0xc9, 0x41, 0x13, 0xf0, 0x03, 0x87, 0xb2, 0xb2, 0xcf, 0x12, 0x38, 0x99, 0x85, 0x95, 0x86, 0xd3, + 0x62, 0xc4, 0xa3, 0x59, 0xa4, 0x2b, 0xf9, 0x35, 0x2b, 0x38, 0x1a, 0x8f, 0x61, 0x63, 0x84, 0x7f, + 0xc2, 0x0c, 0x65, 0x21, 0x33, 0xaa, 0x90, 0xb9, 0x4b, 0x5a, 0x84, 0x91, 0x31, 0x43, 0x96, 0x59, + 0x1b, 0xcf, 0x11, 0x60, 0x8b, 0xd8, 0xf5, 0xff, 0x4f, 0x05, 0xbe, 0x06, 0xaa, 0xdb, 0x68, 0x50, + 0xc2, 0x64, 0xfb, 0xcb, 0xd3, 0x60, 0x28, 0x28, 0xc3, 0xa1, 0x60, 0x94, 0x60, 0x63, 0xc4, 0x1a, + 0x19, 0xc9, 0xa1, 0x08, 0x34, 0x2e, 0xa2, 0x6e, 0x33, 0x5b, 0x08, 0x5e, 0xb7, 0x04, 0x6d, 0xfc, + 0x1c, 0x07, 0xf5, 0x11, 0xb3, 0x59, 0x8f, 0xf2, 0xe9, 0x40, 0x99, 0xed, 0xc9, 0xe9, 0x80, 0xe6, + 0x99, 0x0e, 0x12, 0x37, 0x31, 0x62, 0xe2, 0x8b, 0x8d, 0x98, 0x34, 0x28, 0x1e, 0x69, 0x08, 0x57, + 0xd7, 0x2c, 0x4e, 0x86, 0x5c, 0x4a, 0x8c, 0xb8, 0x94, 0x81, 0x24, 0x73, 0x99, 0xdd, 0xca, 0x26, + 0xc5, 0xb5, 0x7f, 0xc0, 0x0f, 0x61, 0x95, 0x7c, 0xd3, 0x25, 0x35, 0x46, 0xea, 0x59, 0x75, 0xe1, + 0x8c, 0x0c, 0x64, 0x18, 0xd7, 0xe1, 0x8a, 0x1f, 0xa3, 0x20, 0xe1, 0xd2, 0x40, 0x34, 0x30, 0x90, + 0xb7, 0x55, 0xc0, 0x32, 0xa8, 0x67, 0x95, 0x8a, 0x1b, 0x19, 0xca, 0x77, 0xa2, 0x2a, 0x5a, 0xe2, + 0x25, 0xca, 0x28, 0xf8, 0x6d, 0xe2, 0xdf, 0x12, 0x1a, 0xdd, 0x57, 0x5f, 0x41, 0x66, 0x14, 0x20, + 0x0d, 0xb9, 0x0f, 0xab, 0x54, 0xde, 0xc9, 0xe6, 0x9a, 0xd1, 0x14, 0xd9, 0x5e, 0x03, 0xb4, 0xf1, + 0x93, 0x02, 0x1b, 0x87, 0x9e, 0x33, 0xd1, 0x62, 0x65, 0x50, 0xed, 0x1a, 0x73, 0xdc, 0x8e, 0x70, + 0xf5, 0x6a, 0xf1, 0x66, 0x94, 0x7c, 0x21, 0xa4, 0x24, 0x20, 0x96, 0x84, 0x06, 0x31, 0x8d, 0x0f, + 0x93, 0x3e, 0x48, 0xae, 0x72, 0x51, 0x72, 0x13, 0x97, 0x4f, 0x6e, 0xa8, 0xb4, 0x92, 0x53, 0xbb, + 0x45, 0x1d, 0x76, 0x0b, 0x3e, 0x1c, 0xec, 0xbe, 0x15, 0x11, 0xc8, 0x8f, 0x66, 0x72, 0x74, 0x34, + 0x5a, 0xcb, 0x5e, 0x85, 0x2f, 0xe3, 0x90, 0x19, 0x55, 0x23, 0xf3, 0xbe, 0x94, 0xac, 0x8c, 0x0e, + 0x85, 0xf8, 0x32, 0x86, 0x82, 0xb2, 0xd8, 0x50, 0x98, 0x6f, 0x04, 0x0c, 0x47, 0xb2, 0x7a, 0xe9, + 0xa9, 0xaf, 0xc3, 0x7a, 0xa9, 0xea, 0x7a, 0xec, 0xc2, 0xee, 0xbf, 0xf1, 0x3d, 0x82, 0x54, 0x28, + 0x7a, 0xf8, 0x2d, 0x48, 0x3c, 0xaa, 0x94, 0x2a, 0xe9, 0x98, 0xb6, 0x71, 0x72, 0xaa, 0xbf, 0x16, + 0xfa, 0xc4, 0x3b, 0x0b, 0x6f, 0x42, 0xf2, 0xd0, 0xda, 0xaf, 0xec, 0xa5, 0x91, 0x96, 0x39, 0x39, + 0xd5, 0xd3, 0xa1, 0xef, 0x82, 0xc4, 0xd7, 0x41, 0x2d, 0x7f, 0x7a, 0x70, 0xb0, 0x5f, 0x49, 0xc7, + 0xb5, 0x37, 0x4e, 0x4e, 0xf5, 0xd7, 0x43, 0x1c, 0x65, 0xb7, 0xdd, 0x76, 0x98, 0xb6, 0xf1, 0xc3, + 0x2f, 0xb9, 0xd8, 0x6f, 0xbf, 0xe6, 0xc2, 0x7a, 0x8b, 0xbf, 0xaf, 0xc0, 0x8a, 0x2c, 0x03, 0x6c, + 0xcb, 0x97, 0xe9, 0xcd, 0x59, 0x36, 0xa9, 0x74, 0x4d, 0x7b, 0x77, 0x36, 0x66, 0x59, 0x61, 0x4d, + 0x50, 0xfd, 0xb7, 0x04, 0xde, 0x8a, 0xc2, 0x8d, 0xbc, 0x80, 0x34, 0x73, 0x56, 0x76, 0xa9, 0xe8, + 0x19, 0x24, 0xf8, 0x68, 0xc3, 0xc5, 0x28, 0xdc, 0xe4, 0x43, 0x44, 0xdb, 0x99, 0x0b, 0xe3, 0x2b, + 0xdc, 0x46, 0xf8, 0x73, 0x50, 0xfd, 0xe7, 0x04, 0xbe, 0x1d, 0x25, 0x60, 0xda, 0xb3, 0x43, 0xbb, + 0x36, 0x51, 0xdf, 0x7b, 0xfc, 0x7f, 0x03, 0x77, 0x85, 0xef, 0xec, 0x68, 0x57, 0x26, 0xdf, 0x19, + 0xd1, 0xae, 0x4c, 0x79, 0x0d, 0x6c, 0x23, 0x9e, 0x26, 0xb9, 0xe2, 0xb7, 0x66, 0xdc, 0x41, 0xb3, + 0xa6, 0x69, 0x6c, 0xe5, 0x1d, 0xc3, 0x7a, 0x78, 0x03, 0xe1, 0x99, 0x42, 0x3f, 0xb6, 0xe0, 0xb4, + 0xdb, 0xf3, 0x81, 0xa4, 0xea, 0x3e, 0x24, 0xfd, 0xd6, 0xd9, 0x59, 0x60, 0x24, 0x47, 0xeb, 0x9c, + 0x36, 0x60, 0xf3, 0x68, 0x1b, 0xe1, 0x03, 0x48, 0x8a, 0xd9, 0x80, 0x23, 0x3b, 0x27, 0x3c, 0x42, + 0x2e, 0xaa, 0x8e, 0xdd, 0x27, 0x67, 0xaf, 0x72, 0xb1, 0x3f, 0x5f, 0xe5, 0x62, 0xdf, 0x9d, 0xe7, + 0xd0, 0xd9, 0x79, 0x0e, 0xfd, 0x71, 0x9e, 0x43, 0x7f, 0x9f, 0xe7, 0xd0, 0x93, 0x8f, 0x17, 0xfd, + 0x1f, 0x7d, 0x47, 0x92, 0x5f, 0xc4, 0xaa, 0xaa, 0xd0, 0xb6, 0xf3, 0x6f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xc0, 0xc2, 0x35, 0xb1, 0x94, 0x0f, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ContentClient is the client API for Content service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ContentClient interface { + // Info returns information about a committed object. + // + // This call can be used for getting the size of content and checking for + // existence. + Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) + // Update updates content metadata. + // + // This call can be used to manage the mutable content labels. The + // immutable metadata such as digest, size, and committed at cannot + // be updated. + Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) + // List streams the entire set of content as Info objects and closes the + // stream. + // + // Typically, this will yield a large response, chunked into messages. + // Clients should make provisions to ensure they can handle the entire data + // set. + List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) + // Delete will delete the referenced object. + Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*types.Empty, error) + // Read allows one to read an object based on the offset into the content. + // + // The requested data may be returned in one or more messages. + Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) + // Status returns the status for a single reference. + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) + // ListStatuses returns the status of ongoing object ingestions, started via + // Write. + // + // Only those matching the regular expression will be provided in the + // response. If the provided regular expression is empty, all ingestions + // will be provided. + ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) + // Write begins or resumes writes to a resource identified by a unique ref. + // Only one active stream may exist at a time for each ref. + // + // Once a write stream has started, it may only write to a single ref, thus + // once a stream is started, the ref may be omitted on subsequent writes. + // + // For any write transaction represented by a ref, only a single write may + // be made to a given offset. If overlapping writes occur, it is an error. + // Writes should be sequential and implementations may throw an error if + // this is required. + // + // If expected_digest is set and already part of the content store, the + // write will fail. + // + // When completed, the commit flag should be set to true. If expected size + // or digest is set, the content will be validated against those values. + Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) + // Abort cancels the ongoing write named in the request. Any resources + // associated with the write will be collected. + Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*types.Empty, error) +} + +type contentClient struct { + cc *grpc.ClientConn +} + +func NewContentClient(cc *grpc.ClientConn) ContentClient { + return &contentClient{cc} +} + +func (c *contentClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) { + out := new(InfoResponse) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Info", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) { + out := new(UpdateResponse) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) { + stream, err := c.cc.NewStream(ctx, &_Content_serviceDesc.Streams[0], "/containerd.services.content.v1.Content/List", opts...) + if err != nil { + return nil, err + } + x := &contentListClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Content_ListClient interface { + Recv() (*ListContentResponse, error) + grpc.ClientStream +} + +type contentListClient struct { + grpc.ClientStream +} + +func (x *contentListClient) Recv() (*ListContentResponse, error) { + m := new(ListContentResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *contentClient) Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*types.Empty, error) { + out := new(types.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) { + stream, err := c.cc.NewStream(ctx, &_Content_serviceDesc.Streams[1], "/containerd.services.content.v1.Content/Read", opts...) + if err != nil { + return nil, err + } + x := &contentReadClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Content_ReadClient interface { + Recv() (*ReadContentResponse, error) + grpc.ClientStream +} + +type contentReadClient struct { + grpc.ClientStream +} + +func (x *contentReadClient) Recv() (*ReadContentResponse, error) { + m := new(ReadContentResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *contentClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Status", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) { + out := new(ListStatusesResponse) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/ListStatuses", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) { + stream, err := c.cc.NewStream(ctx, &_Content_serviceDesc.Streams[2], "/containerd.services.content.v1.Content/Write", opts...) + if err != nil { + return nil, err + } + x := &contentWriteClient{stream} + return x, nil +} + +type Content_WriteClient interface { + Send(*WriteContentRequest) error + Recv() (*WriteContentResponse, error) + grpc.ClientStream +} + +type contentWriteClient struct { + grpc.ClientStream +} + +func (x *contentWriteClient) Send(m *WriteContentRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *contentWriteClient) Recv() (*WriteContentResponse, error) { + m := new(WriteContentResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *contentClient) Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*types.Empty, error) { + out := new(types.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Abort", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ContentServer is the server API for Content service. +type ContentServer interface { + // Info returns information about a committed object. + // + // This call can be used for getting the size of content and checking for + // existence. + Info(context.Context, *InfoRequest) (*InfoResponse, error) + // Update updates content metadata. + // + // This call can be used to manage the mutable content labels. The + // immutable metadata such as digest, size, and committed at cannot + // be updated. + Update(context.Context, *UpdateRequest) (*UpdateResponse, error) + // List streams the entire set of content as Info objects and closes the + // stream. + // + // Typically, this will yield a large response, chunked into messages. + // Clients should make provisions to ensure they can handle the entire data + // set. + List(*ListContentRequest, Content_ListServer) error + // Delete will delete the referenced object. + Delete(context.Context, *DeleteContentRequest) (*types.Empty, error) + // Read allows one to read an object based on the offset into the content. + // + // The requested data may be returned in one or more messages. + Read(*ReadContentRequest, Content_ReadServer) error + // Status returns the status for a single reference. + Status(context.Context, *StatusRequest) (*StatusResponse, error) + // ListStatuses returns the status of ongoing object ingestions, started via + // Write. + // + // Only those matching the regular expression will be provided in the + // response. If the provided regular expression is empty, all ingestions + // will be provided. + ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error) + // Write begins or resumes writes to a resource identified by a unique ref. + // Only one active stream may exist at a time for each ref. + // + // Once a write stream has started, it may only write to a single ref, thus + // once a stream is started, the ref may be omitted on subsequent writes. + // + // For any write transaction represented by a ref, only a single write may + // be made to a given offset. If overlapping writes occur, it is an error. + // Writes should be sequential and implementations may throw an error if + // this is required. + // + // If expected_digest is set and already part of the content store, the + // write will fail. + // + // When completed, the commit flag should be set to true. If expected size + // or digest is set, the content will be validated against those values. + Write(Content_WriteServer) error + // Abort cancels the ongoing write named in the request. Any resources + // associated with the write will be collected. + Abort(context.Context, *AbortRequest) (*types.Empty, error) +} + +// UnimplementedContentServer can be embedded to have forward compatible implementations. +type UnimplementedContentServer struct { +} + +func (*UnimplementedContentServer) Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") +} +func (*UnimplementedContentServer) Update(ctx context.Context, req *UpdateRequest) (*UpdateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (*UnimplementedContentServer) List(req *ListContentRequest, srv Content_ListServer) error { + return status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (*UnimplementedContentServer) Delete(ctx context.Context, req *DeleteContentRequest) (*types.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (*UnimplementedContentServer) Read(req *ReadContentRequest, srv Content_ReadServer) error { + return status.Errorf(codes.Unimplemented, "method Read not implemented") +} +func (*UnimplementedContentServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") +} +func (*UnimplementedContentServer) ListStatuses(ctx context.Context, req *ListStatusesRequest) (*ListStatusesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListStatuses not implemented") +} +func (*UnimplementedContentServer) Write(srv Content_WriteServer) error { + return status.Errorf(codes.Unimplemented, "method Write not implemented") +} +func (*UnimplementedContentServer) Abort(ctx context.Context, req *AbortRequest) (*types.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Abort not implemented") +} + +func RegisterContentServer(s *grpc.Server, srv ContentServer) { + s.RegisterService(&_Content_serviceDesc, srv) +} + +func _Content_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Info(ctx, req.(*InfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Update(ctx, req.(*UpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_List_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListContentRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ContentServer).List(m, &contentListServer{stream}) +} + +type Content_ListServer interface { + Send(*ListContentResponse) error + grpc.ServerStream +} + +type contentListServer struct { + grpc.ServerStream +} + +func (x *contentListServer) Send(m *ListContentResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Content_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteContentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Delete(ctx, req.(*DeleteContentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_Read_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ReadContentRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ContentServer).Read(m, &contentReadServer{stream}) +} + +type Content_ReadServer interface { + Send(*ReadContentResponse) error + grpc.ServerStream +} + +type contentReadServer struct { + grpc.ServerStream +} + +func (x *contentReadServer) Send(m *ReadContentResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Content_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_ListStatuses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListStatusesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).ListStatuses(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/ListStatuses", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).ListStatuses(ctx, req.(*ListStatusesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_Write_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ContentServer).Write(&contentWriteServer{stream}) +} + +type Content_WriteServer interface { + Send(*WriteContentResponse) error + Recv() (*WriteContentRequest, error) + grpc.ServerStream +} + +type contentWriteServer struct { + grpc.ServerStream +} + +func (x *contentWriteServer) Send(m *WriteContentResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *contentWriteServer) Recv() (*WriteContentRequest, error) { + m := new(WriteContentRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Content_Abort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AbortRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Abort(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Abort", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Abort(ctx, req.(*AbortRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Content_serviceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.content.v1.Content", + HandlerType: (*ContentServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Info", + Handler: _Content_Info_Handler, + }, + { + MethodName: "Update", + Handler: _Content_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _Content_Delete_Handler, + }, + { + MethodName: "Status", + Handler: _Content_Status_Handler, + }, + { + MethodName: "ListStatuses", + Handler: _Content_ListStatuses_Handler, + }, + { + MethodName: "Abort", + Handler: _Content_Abort_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "List", + Handler: _Content_List_Handler, + ServerStreams: true, + }, + { + StreamName: "Read", + Handler: _Content_Read_Handler, + ServerStreams: true, + }, + { + StreamName: "Write", + Handler: _Content_Write_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "github.com/containerd/containerd/api/services/content/v1/content.proto", +} + +func (m *Info) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintContent(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintContent(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintContent(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintContent(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x22 + n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintContent(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x1a + if m.Size_ != 0 { + i = encodeVarintContent(dAtA, i, uint64(m.Size_)) + i-- + dAtA[i] = 0x10 + } + if len(m.Digest) > 0 { + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *InfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Digest) > 0 { + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *InfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintContent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UpdateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.UpdateMask != nil { + { + size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintContent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintContent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UpdateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintContent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ListContentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListContentRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Filters[iNdEx]) + copy(dAtA[i:], m.Filters[iNdEx]) + i = encodeVarintContent(dAtA, i, uint64(len(m.Filters[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ListContentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListContentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListContentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Info) > 0 { + for iNdEx := len(m.Info) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Info[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintContent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeleteContentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteContentRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Digest) > 0 { + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadContentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadContentRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Size_ != 0 { + i = encodeVarintContent(dAtA, i, uint64(m.Size_)) + i-- + dAtA[i] = 0x18 + } + if m.Offset != 0 { + i = encodeVarintContent(dAtA, i, uint64(m.Offset)) + i-- + dAtA[i] = 0x10 + } + if len(m.Digest) > 0 { + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadContentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadContentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadContentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintContent(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + if m.Offset != 0 { + i = encodeVarintContent(dAtA, i, uint64(m.Offset)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Status) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Status) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Expected) > 0 { + i -= len(m.Expected) + copy(dAtA[i:], m.Expected) + i = encodeVarintContent(dAtA, i, uint64(len(m.Expected))) + i-- + dAtA[i] = 0x32 + } + if m.Total != 0 { + i = encodeVarintContent(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x28 + } + if m.Offset != 0 { + i = encodeVarintContent(dAtA, i, uint64(m.Offset)) + i-- + dAtA[i] = 0x20 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0x1a + } + n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) + if err7 != nil { + return 0, err7 + } + i -= n7 + i = encodeVarintContent(dAtA, i, uint64(n7)) + i-- + dAtA[i] = 0x12 + n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt):]) + if err8 != nil { + return 0, err8 + } + i -= n8 + i = encodeVarintContent(dAtA, i, uint64(n8)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintContent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListStatusesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListStatusesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListStatusesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Filters[iNdEx]) + copy(dAtA[i:], m.Filters[iNdEx]) + i = encodeVarintContent(dAtA, i, uint64(len(m.Filters[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ListStatusesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListStatusesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListStatusesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Statuses) > 0 { + for iNdEx := len(m.Statuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Statuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintContent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WriteContentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteContentRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WriteContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintContent(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintContent(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintContent(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintContent(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x32 + } + if m.Offset != 0 { + i = encodeVarintContent(dAtA, i, uint64(m.Offset)) + i-- + dAtA[i] = 0x28 + } + if len(m.Expected) > 0 { + i -= len(m.Expected) + copy(dAtA[i:], m.Expected) + i = encodeVarintContent(dAtA, i, uint64(len(m.Expected))) + i-- + dAtA[i] = 0x22 + } + if m.Total != 0 { + i = encodeVarintContent(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x18 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0x12 + } + if m.Action != 0 { + i = encodeVarintContent(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WriteContentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteContentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WriteContentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Digest) > 0 { + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0x32 + } + if m.Total != 0 { + i = encodeVarintContent(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x28 + } + if m.Offset != 0 { + i = encodeVarintContent(dAtA, i, uint64(m.Offset)) + i-- + dAtA[i] = 0x20 + } + n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) + if err10 != nil { + return 0, err10 + } + i -= n10 + i = encodeVarintContent(dAtA, i, uint64(n10)) + i-- + dAtA[i] = 0x1a + n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt):]) + if err11 != nil { + return 0, err11 + } + i -= n11 + i = encodeVarintContent(dAtA, i, uint64(n11)) + i-- + dAtA[i] = 0x12 + if m.Action != 0 { + i = encodeVarintContent(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AbortRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AbortRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AbortRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintContent(dAtA []byte, offset int, v uint64) int { + offset -= sovContent(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Info) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.Size_ != 0 { + n += 1 + sovContent(uint64(m.Size_)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) + n += 1 + l + sovContent(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) + n += 1 + l + sovContent(uint64(l)) + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v))) + n += mapEntrySize + 1 + sovContent(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Info.Size() + n += 1 + l + sovContent(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UpdateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Info.Size() + n += 1 + l + sovContent(uint64(l)) + if m.UpdateMask != nil { + l = m.UpdateMask.Size() + n += 1 + l + sovContent(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UpdateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Info.Size() + n += 1 + l + sovContent(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ListContentRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filters) > 0 { + for _, s := range m.Filters { + l = len(s) + n += 1 + l + sovContent(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ListContentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Info) > 0 { + for _, e := range m.Info { + l = e.Size() + n += 1 + l + sovContent(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DeleteContentRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadContentRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.Offset != 0 { + n += 1 + sovContent(uint64(m.Offset)) + } + if m.Size_ != 0 { + n += 1 + sovContent(uint64(m.Size_)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadContentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sovContent(uint64(m.Offset)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Status) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt) + n += 1 + l + sovContent(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) + n += 1 + l + sovContent(uint64(l)) + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.Offset != 0 { + n += 1 + sovContent(uint64(m.Offset)) + } + if m.Total != 0 { + n += 1 + sovContent(uint64(m.Total)) + } + l = len(m.Expected) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovContent(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ListStatusesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filters) > 0 { + for _, s := range m.Filters { + l = len(s) + n += 1 + l + sovContent(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ListStatusesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Statuses) > 0 { + for _, e := range m.Statuses { + l = e.Size() + n += 1 + l + sovContent(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WriteContentRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != 0 { + n += 1 + sovContent(uint64(m.Action)) + } + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.Total != 0 { + n += 1 + sovContent(uint64(m.Total)) + } + l = len(m.Expected) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.Offset != 0 { + n += 1 + sovContent(uint64(m.Offset)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v))) + n += mapEntrySize + 1 + sovContent(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WriteContentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != 0 { + n += 1 + sovContent(uint64(m.Action)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt) + n += 1 + l + sovContent(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) + n += 1 + l + sovContent(uint64(l)) + if m.Offset != 0 { + n += 1 + sovContent(uint64(m.Offset)) + } + if m.Total != 0 { + n += 1 + sovContent(uint64(m.Total)) + } + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *AbortRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovContent(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovContent(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozContent(x uint64) (n int) { + return sovContent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Info) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&Info{`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, + `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Labels:` + mapStringForLabels + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *InfoRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InfoRequest{`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *InfoResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InfoResponse{`, + `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateRequest{`, + `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, + `UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types.FieldMask", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateResponse{`, + `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *ListContentRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListContentRequest{`, + `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *ListContentResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForInfo := "[]Info{" + for _, f := range this.Info { + repeatedStringForInfo += strings.Replace(strings.Replace(f.String(), "Info", "Info", 1), `&`, ``, 1) + "," + } + repeatedStringForInfo += "}" + s := strings.Join([]string{`&ListContentResponse{`, + `Info:` + repeatedStringForInfo + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteContentRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteContentRequest{`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *ReadContentRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReadContentRequest{`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, + `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *ReadContentResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReadContentResponse{`, + `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Status) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Status{`, + `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, + `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, + `Total:` + fmt.Sprintf("%v", this.Total) + `,`, + `Expected:` + fmt.Sprintf("%v", this.Expected) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *StatusRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatusRequest{`, + `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *StatusResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatusResponse{`, + `Status:` + strings.Replace(this.Status.String(), "Status", "Status", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *ListStatusesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListStatusesRequest{`, + `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *ListStatusesResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForStatuses := "[]Status{" + for _, f := range this.Statuses { + repeatedStringForStatuses += strings.Replace(strings.Replace(f.String(), "Status", "Status", 1), `&`, ``, 1) + "," + } + repeatedStringForStatuses += "}" + s := strings.Join([]string{`&ListStatusesResponse{`, + `Statuses:` + repeatedStringForStatuses + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *WriteContentRequest) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&WriteContentRequest{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, + `Total:` + fmt.Sprintf("%v", this.Total) + `,`, + `Expected:` + fmt.Sprintf("%v", this.Expected) + `,`, + `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Labels:` + mapStringForLabels + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *WriteContentResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WriteContentResponse{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, + `Total:` + fmt.Sprintf("%v", this.Total) + `,`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *AbortRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AbortRequest{`, + `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringContent(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Info) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Info: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthContent + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthContent + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthContent + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthContent + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdateMask == nil { + m.UpdateMask = &types.FieldMask{} + } + if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListContentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListContentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListContentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListContentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListContentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = append(m.Info, Info{}) + if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteContentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteContentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadContentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadContentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadContentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadContentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadContentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Status) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Status: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expected", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expected = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &Status{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListStatusesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListStatusesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListStatusesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListStatusesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListStatusesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListStatusesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Statuses = append(m.Statuses, Status{}) + if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WriteContentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteContentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= WriteAction(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expected", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expected = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthContent + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthContent + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthContent + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthContent + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WriteContentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteContentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteContentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= WriteAction(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AbortRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AbortRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AbortRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowContent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthContent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthContent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipContent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthContent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipContent(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowContent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthContent + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupContent + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthContent + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowContent = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupContent = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto b/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto new file mode 100644 index 00000000..086b3e39 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto @@ -0,0 +1,318 @@ +syntax = "proto3"; + +package containerd.services.content.v1; + +import weak "gogoproto/gogo.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/empty.proto"; + +option go_package = "github.com/containerd/containerd/api/services/content/v1;content"; + +// Content provides access to a content addressable storage system. +service Content { + // Info returns information about a committed object. + // + // This call can be used for getting the size of content and checking for + // existence. + rpc Info(InfoRequest) returns (InfoResponse); + + // Update updates content metadata. + // + // This call can be used to manage the mutable content labels. The + // immutable metadata such as digest, size, and committed at cannot + // be updated. + rpc Update(UpdateRequest) returns (UpdateResponse); + + // List streams the entire set of content as Info objects and closes the + // stream. + // + // Typically, this will yield a large response, chunked into messages. + // Clients should make provisions to ensure they can handle the entire data + // set. + rpc List(ListContentRequest) returns (stream ListContentResponse); + + // Delete will delete the referenced object. + rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty); + + // Read allows one to read an object based on the offset into the content. + // + // The requested data may be returned in one or more messages. + rpc Read(ReadContentRequest) returns (stream ReadContentResponse); + + // Status returns the status for a single reference. + rpc Status(StatusRequest) returns (StatusResponse); + + // ListStatuses returns the status of ongoing object ingestions, started via + // Write. + // + // Only those matching the regular expression will be provided in the + // response. If the provided regular expression is empty, all ingestions + // will be provided. + rpc ListStatuses(ListStatusesRequest) returns (ListStatusesResponse); + + // Write begins or resumes writes to a resource identified by a unique ref. + // Only one active stream may exist at a time for each ref. + // + // Once a write stream has started, it may only write to a single ref, thus + // once a stream is started, the ref may be omitted on subsequent writes. + // + // For any write transaction represented by a ref, only a single write may + // be made to a given offset. If overlapping writes occur, it is an error. + // Writes should be sequential and implementations may throw an error if + // this is required. + // + // If expected_digest is set and already part of the content store, the + // write will fail. + // + // When completed, the commit flag should be set to true. If expected size + // or digest is set, the content will be validated against those values. + rpc Write(stream WriteContentRequest) returns (stream WriteContentResponse); + + // Abort cancels the ongoing write named in the request. Any resources + // associated with the write will be collected. + rpc Abort(AbortRequest) returns (google.protobuf.Empty); +} + +message Info { + // Digest is the hash identity of the blob. + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + + // Size is the total number of bytes in the blob. + int64 size = 2; + + // CreatedAt provides the time at which the blob was committed. + google.protobuf.Timestamp created_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + // UpdatedAt provides the time the info was last updated. + google.protobuf.Timestamp updated_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + // Labels are arbitrary data on snapshots. + // + // The combined size of a key/value pair cannot exceed 4096 bytes. + map labels = 5; +} + +message InfoRequest { + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; +} + +message InfoResponse { + Info info = 1 [(gogoproto.nullable) = false]; +} + +message UpdateRequest { + Info info = 1 [(gogoproto.nullable) = false]; + + // UpdateMask specifies which fields to perform the update on. If empty, + // the operation applies to all fields. + // + // In info, Digest, Size, and CreatedAt are immutable, + // other field may be updated using this mask. + // If no mask is provided, all mutable field are updated. + google.protobuf.FieldMask update_mask = 2; +} + +message UpdateResponse { + Info info = 1 [(gogoproto.nullable) = false]; +} + +message ListContentRequest { + // Filters contains one or more filters using the syntax defined in the + // containerd filter package. + // + // The returned result will be those that match any of the provided + // filters. Expanded, containers that match the following will be + // returned: + // + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // + // If filters is zero-length or nil, all items will be returned. + repeated string filters = 1; +} + +message ListContentResponse { + repeated Info info = 1 [(gogoproto.nullable) = false]; +} + +message DeleteContentRequest { + // Digest specifies which content to delete. + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; +} + +// ReadContentRequest defines the fields that make up a request to read a portion of +// data from a stored object. +message ReadContentRequest { + // Digest is the hash identity to read. + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + + // Offset specifies the number of bytes from the start at which to begin + // the read. If zero or less, the read will be from the start. This uses + // standard zero-indexed semantics. + int64 offset = 2; + + // size is the total size of the read. If zero, the entire blob will be + // returned by the service. + int64 size = 3; +} + +// ReadContentResponse carries byte data for a read request. +message ReadContentResponse { + int64 offset = 1; // offset of the returned data + bytes data = 2; // actual data +} + +message Status { + google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + string ref = 3; + int64 offset = 4; + int64 total = 5; + string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; +} + + +message StatusRequest { + string ref = 1; +} + +message StatusResponse { + Status status = 1; +} + +message ListStatusesRequest { + repeated string filters = 1; +} + +message ListStatusesResponse { + repeated Status statuses = 1 [(gogoproto.nullable) = false]; +} + +// WriteAction defines the behavior of a WriteRequest. +enum WriteAction { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "WriteAction"; + + // WriteActionStat instructs the writer to return the current status while + // holding the lock on the write. + STAT = 0 [(gogoproto.enumvalue_customname) = "WriteActionStat"]; + + // WriteActionWrite sets the action for the write request to write data. + // + // Any data included will be written at the provided offset. The + // transaction will be left open for further writes. + // + // This is the default. + WRITE = 1 [(gogoproto.enumvalue_customname) = "WriteActionWrite"]; + + // WriteActionCommit will write any outstanding data in the message and + // commit the write, storing it under the digest. + // + // This can be used in a single message to send the data, verify it and + // commit it. + // + // This action will always terminate the write. + COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"]; +} + +// WriteContentRequest writes data to the request ref at offset. +message WriteContentRequest { + // Action sets the behavior of the write. + // + // When this is a write and the ref is not yet allocated, the ref will be + // allocated and the data will be written at offset. + // + // If the action is write and the ref is allocated, it will accept data to + // an offset that has not yet been written. + // + // If the action is write and there is no data, the current write status + // will be returned. This works differently from status because the stream + // holds a lock. + WriteAction action = 1; + + // Ref identifies the pre-commit object to write to. + string ref = 2; + + // Total can be set to have the service validate the total size of the + // committed content. + // + // The latest value before or with the commit action message will be use to + // validate the content. If the offset overflows total, the service may + // report an error. It is only required on one message for the write. + // + // If the value is zero or less, no validation of the final content will be + // performed. + int64 total = 3; + + // Expected can be set to have the service validate the final content against + // the provided digest. + // + // If the digest is already present in the object store, an AlreadyExists + // error will be returned. + // + // Only the latest version will be used to check the content against the + // digest. It is only required to include it on a single message, before or + // with the commit action message. + string expected = 4 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + + // Offset specifies the number of bytes from the start at which to begin + // the write. For most implementations, this means from the start of the + // file. This uses standard, zero-indexed semantics. + // + // If the action is write, the remote may remove all previously written + // data after the offset. Implementations may support arbitrary offsets but + // MUST support reseting this value to zero with a write. If an + // implementation does not support a write at a particular offset, an + // OutOfRange error must be returned. + int64 offset = 5; + + // Data is the actual bytes to be written. + // + // If this is empty and the message is not a commit, a response will be + // returned with the current write state. + bytes data = 6; + + // Labels are arbitrary data on snapshots. + // + // The combined size of a key/value pair cannot exceed 4096 bytes. + map labels = 7; +} + +// WriteContentResponse is returned on the culmination of a write call. +message WriteContentResponse { + // Action contains the action for the final message of the stream. A writer + // should confirm that they match the intended result. + WriteAction action = 1; + + // StartedAt provides the time at which the write began. + // + // This must be set for stat and commit write actions. All other write + // actions may omit this. + google.protobuf.Timestamp started_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + // UpdatedAt provides the last time of a successful write. + // + // This must be set for stat and commit write actions. All other write + // actions may omit this. + google.protobuf.Timestamp updated_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + // Offset is the current committed size for the write. + int64 offset = 4; + + // Total provides the current, expected total size of the write. + // + // We include this to provide consistency with the Status structure on the + // client writer. + // + // This is only valid on the Stat and Commit response. + int64 total = 5; + + // Digest, if present, includes the digest up to the currently committed + // bytes. If action is commit, this field will be set. It is implementation + // defined if this is set for other actions. + string digest = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; +} + +message AbortRequest { + string ref = 1; +} diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression.go b/vendor/github.com/containerd/containerd/archive/compression/compression.go new file mode 100644 index 00000000..2338de6b --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/compression/compression.go @@ -0,0 +1,266 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package compression + +import ( + "bufio" + "bytes" + "compress/gzip" + "context" + "fmt" + "io" + "os" + "os/exec" + "strconv" + "sync" + + "github.com/containerd/containerd/log" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int +) + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Gzip is gzip compression algorithm. + Gzip +) + +const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ" + +var ( + initPigz sync.Once + unpigzPath string +) + +var ( + bufioReader32KPool = &sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) }, + } +) + +// DecompressReadCloser include the stream after decompress and the compress method detected. +type DecompressReadCloser interface { + io.ReadCloser + // GetCompression returns the compress method which is used before decompressing + GetCompression() Compression +} + +type readCloserWrapper struct { + io.Reader + compression Compression + closer func() error +} + +func (r *readCloserWrapper) Close() error { + if r.closer != nil { + return r.closer() + } + return nil +} + +func (r *readCloserWrapper) GetCompression() Compression { + return r.compression +} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (w *writeCloserWrapper) Close() error { + if w.closer != nil { + w.closer() + } + return nil +} + +type bufferedReader struct { + buf *bufio.Reader +} + +func newBufferedReader(r io.Reader) *bufferedReader { + buf := bufioReader32KPool.Get().(*bufio.Reader) + buf.Reset(r) + return &bufferedReader{buf} +} + +func (r *bufferedReader) Read(p []byte) (n int, err error) { + if r.buf == nil { + return 0, io.EOF + } + n, err = r.buf.Read(p) + if err == io.EOF { + r.buf.Reset(nil) + bufioReader32KPool.Put(r.buf) + r.buf = nil + } + return +} + +func (r *bufferedReader) Peek(n int) ([]byte, error) { + if r.buf == nil { + return nil, io.EOF + } + return r.buf.Peek(n) +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Gzip: {0x1F, 0x8B, 0x08}, + } { + if len(source) < len(m) { + // Len too short + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { + buf := newBufferedReader(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue docker/docker#18170 + return nil, err + } + + switch compression := DetectCompression(bs); compression { + case Uncompressed: + return &readCloserWrapper{ + Reader: buf, + compression: compression, + }, nil + case Gzip: + ctx, cancel := context.WithCancel(context.Background()) + gzReader, err := gzipDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + + return &readCloserWrapper{ + Reader: gzReader, + compression: compression, + closer: func() error { + cancel() + return gzReader.Close() + }, + }, nil + + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + switch compression { + case Uncompressed: + return &writeCloserWrapper{dest, nil}, nil + case Gzip: + return gzip.NewWriter(dest), nil + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Gzip: + return "gz" + } + return "" +} + +func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { + initPigz.Do(func() { + if unpigzPath = detectPigz(); unpigzPath != "" { + log.L.Debug("using pigz for decompression") + } + }) + + if unpigzPath == "" { + return gzip.NewReader(buf) + } + + return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) +} + +func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) { + reader, writer := io.Pipe() + + cmd.Stdin = in + cmd.Stdout = writer + + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + if err := cmd.Start(); err != nil { + return nil, err + } + + go func() { + if err := cmd.Wait(); err != nil { + writer.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + writer.Close() + } + }() + + return reader, nil +} + +func detectPigz() string { + path, err := exec.LookPath("unpigz") + if err != nil { + log.L.WithError(err).Debug("unpigz not found, falling back to go gzip") + return "" + } + + // Check if pigz disabled via CONTAINERD_DISABLE_PIGZ env variable + value := os.Getenv(disablePigzEnv) + if value == "" { + return path + } + + disable, err := strconv.ParseBool(value) + if err != nil { + log.L.WithError(err).Warnf("could not parse %s: %s", disablePigzEnv, value) + return path + } + + if disable { + return "" + } + + return path +} diff --git a/vendor/github.com/containerd/containerd/archive/tar_unix.go b/vendor/github.com/containerd/containerd/archive/tar_unix.go index e8721875..d081351f 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_unix.go +++ b/vendor/github.com/containerd/containerd/archive/tar_unix.go @@ -176,7 +176,10 @@ func copyDirInfo(fi os.FileInfo, path string) error { return errors.Wrapf(err, "failed to chmod %s", path) } - timespec := []unix.Timespec{unix.Timespec(fs.StatAtime(st)), unix.Timespec(fs.StatMtime(st))} + timespec := []unix.Timespec{ + unix.NsecToTimespec(syscall.TimespecToNsec(fs.StatAtime(st))), + unix.NsecToTimespec(syscall.TimespecToNsec(fs.StatMtime(st))), + } if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { return errors.Wrapf(err, "failed to utime %s", path) } diff --git a/vendor/github.com/containerd/containerd/containers/containers.go b/vendor/github.com/containerd/containerd/containers/containers.go new file mode 100644 index 00000000..7174bbd6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/containers/containers.go @@ -0,0 +1,112 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containers + +import ( + "context" + "time" + + "github.com/gogo/protobuf/types" +) + +// Container represents the set of data pinned by a container. Unless otherwise +// noted, the resources here are considered in use by the container. +// +// The resources specified in this object are used to create tasks from the container. +type Container struct { + // ID uniquely identifies the container in a namespace. + // + // This property is required and cannot be changed after creation. + ID string + + // Labels provide metadata extension for a container. + // + // These are optional and fully mutable. + Labels map[string]string + + // Image specifies the image reference used for a container. + // + // This property is optional and mutable. + Image string + + // Runtime specifies which runtime should be used when launching container + // tasks. + // + // This property is required and immutable. + Runtime RuntimeInfo + + // Spec should carry the runtime specification used to implement the + // container. + // + // This field is required but mutable. + Spec *types.Any + + // SnapshotKey specifies the snapshot key to use for the container's root + // filesystem. When starting a task from this container, a caller should + // look up the mounts from the snapshot service and include those on the + // task create request. + // + // This field is not required but mutable. + SnapshotKey string + + // Snapshotter specifies the snapshotter name used for rootfs + // + // This field is not required but immutable. + Snapshotter string + + // CreatedAt is the time at which the container was created. + CreatedAt time.Time + + // UpdatedAt is the time at which the container was updated. + UpdatedAt time.Time + + // Extensions stores client-specified metadata + Extensions map[string]types.Any +} + +// RuntimeInfo holds runtime specific information +type RuntimeInfo struct { + Name string + Options *types.Any +} + +// Store interacts with the underlying container storage +type Store interface { + // Get a container using the id. + // + // Container object is returned on success. If the id is not known to the + // store, an error will be returned. + Get(ctx context.Context, id string) (Container, error) + + // List returns containers that match one or more of the provided filters. + List(ctx context.Context, filters ...string) ([]Container, error) + + // Create a container in the store from the provided container. + Create(ctx context.Context, container Container) (Container, error) + + // Update the container with the provided container object. ID must be set. + // + // If one or more fieldpaths are provided, only the field corresponding to + // the fieldpaths will be mutated. + Update(ctx context.Context, container Container, fieldpaths ...string) (Container, error) + + // Delete a container using the id. + // + // nil will be returned on success. If the container is not known to the + // store, ErrNotFound will be returned. + Delete(ctx context.Context, id string) error +} diff --git a/vendor/github.com/containerd/containerd/content/content.go b/vendor/github.com/containerd/containerd/content/content.go new file mode 100644 index 00000000..d8141a68 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/content.go @@ -0,0 +1,182 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "context" + "io" + "time" + + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer +type ReaderAt interface { + io.ReaderAt + io.Closer + Size() int64 +} + +// Provider provides a reader interface for specific content +type Provider interface { + // ReaderAt only requires desc.Digest to be set. + // Other fields in the descriptor may be used internally for resolving + // the location of the actual data. + ReaderAt(ctx context.Context, dec ocispec.Descriptor) (ReaderAt, error) +} + +// Ingester writes content +type Ingester interface { + // Some implementations require WithRef to be included in opts. + Writer(ctx context.Context, opts ...WriterOpt) (Writer, error) +} + +// Info holds content specific information +// +// TODO(stevvooe): Consider a very different name for this struct. Info is way +// to general. It also reads very weird in certain context, like pluralization. +type Info struct { + Digest digest.Digest + Size int64 + CreatedAt time.Time + UpdatedAt time.Time + Labels map[string]string +} + +// Status of a content operation +type Status struct { + Ref string + Offset int64 + Total int64 + Expected digest.Digest + StartedAt time.Time + UpdatedAt time.Time +} + +// WalkFunc defines the callback for a blob walk. +type WalkFunc func(Info) error + +// Manager provides methods for inspecting, listing and removing content. +type Manager interface { + // Info will return metadata about content available in the content store. + // + // If the content is not present, ErrNotFound will be returned. + Info(ctx context.Context, dgst digest.Digest) (Info, error) + + // Update updates mutable information related to content. + // If one or more fieldpaths are provided, only those + // fields will be updated. + // Mutable fields: + // labels.* + Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error) + + // Walk will call fn for each item in the content store which + // match the provided filters. If no filters are given all + // items will be walked. + Walk(ctx context.Context, fn WalkFunc, filters ...string) error + + // Delete removes the content from the store. + Delete(ctx context.Context, dgst digest.Digest) error +} + +// IngestManager provides methods for managing ingests. +type IngestManager interface { + // Status returns the status of the provided ref. + Status(ctx context.Context, ref string) (Status, error) + + // ListStatuses returns the status of any active ingestions whose ref match the + // provided regular expression. If empty, all active ingestions will be + // returned. + ListStatuses(ctx context.Context, filters ...string) ([]Status, error) + + // Abort completely cancels the ingest operation targeted by ref. + Abort(ctx context.Context, ref string) error +} + +// Writer handles the write of content into a content store +type Writer interface { + // Close closes the writer, if the writer has not been + // committed this allows resuming or aborting. + // Calling Close on a closed writer will not error. + io.WriteCloser + + // Digest may return empty digest or panics until committed. + Digest() digest.Digest + + // Commit commits the blob (but no roll-back is guaranteed on an error). + // size and expected can be zero-value when unknown. + // Commit always closes the writer, even on error. + // ErrAlreadyExists aborts the writer. + Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error + + // Status returns the current state of write + Status() (Status, error) + + // Truncate updates the size of the target blob + Truncate(size int64) error +} + +// Store combines the methods of content-oriented interfaces into a set that +// are commonly provided by complete implementations. +type Store interface { + Manager + Provider + IngestManager + Ingester +} + +// Opt is used to alter the mutable properties of content +type Opt func(*Info) error + +// WithLabels allows labels to be set on content +func WithLabels(labels map[string]string) Opt { + return func(info *Info) error { + info.Labels = labels + return nil + } +} + +// WriterOpts is internally used by WriterOpt. +type WriterOpts struct { + Ref string + Desc ocispec.Descriptor +} + +// WriterOpt is used for passing options to Ingester.Writer. +type WriterOpt func(*WriterOpts) error + +// WithDescriptor specifies an OCI descriptor. +// Writer may optionally use the descriptor internally for resolving +// the location of the actual data. +// Write does not require any field of desc to be set. +// If the data size is unknown, desc.Size should be set to 0. +// Some implementations may also accept negative values as "unknown". +func WithDescriptor(desc ocispec.Descriptor) WriterOpt { + return func(opts *WriterOpts) error { + opts.Desc = desc + return nil + } +} + +// WithRef specifies a ref string. +func WithRef(ref string) WriterOpt { + return func(opts *WriterOpts) error { + opts.Ref = ref + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go new file mode 100644 index 00000000..c1c20461 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/helpers.go @@ -0,0 +1,237 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "context" + "io" + "io/ioutil" + "math/rand" + "sync" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +// NewReader returns a io.Reader from a ReaderAt +func NewReader(ra ReaderAt) io.Reader { + rd := io.NewSectionReader(ra, 0, ra.Size()) + return rd +} + +// ReadBlob retrieves the entire contents of the blob from the provider. +// +// Avoid using this for large blobs, such as layers. +func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) { + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + defer ra.Close() + + p := make([]byte, ra.Size()) + + n, err := ra.ReadAt(p, 0) + if err == io.EOF { + if int64(n) != ra.Size() { + err = io.ErrUnexpectedEOF + } else { + err = nil + } + } + return p, err +} + +// WriteBlob writes data with the expected digest into the content store. If +// expected already exists, the method returns immediately and the reader will +// not be consumed. +// +// This is useful when the digest and size are known beforehand. +// +// Copy is buffered, so no need to wrap reader in buffered io. +func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc ocispec.Descriptor, opts ...Opt) error { + cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc)) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return errors.Wrap(err, "failed to open writer") + } + + return nil // all ready present + } + defer cw.Close() + + return Copy(ctx, cw, r, desc.Size, desc.Digest, opts...) +} + +// OpenWriter opens a new writer for the given reference, retrying if the writer +// is locked until the reference is available or returns an error. +func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, error) { + var ( + cw Writer + err error + retry = 16 + ) + for { + cw, err = cs.Writer(ctx, opts...) + if err != nil { + if !errdefs.IsUnavailable(err) { + return nil, err + } + + // TODO: Check status to determine if the writer is active, + // continue waiting while active, otherwise return lock + // error or abort. Requires asserting for an ingest manager + + select { + case <-time.After(time.Millisecond * time.Duration(rand.Intn(retry))): + if retry < 2048 { + retry = retry << 1 + } + continue + case <-ctx.Done(): + // Propagate lock error + return nil, err + } + + } + break + } + + return cw, err +} + +// Copy copies data with the expected digest from the reader into the +// provided content store writer. This copy commits the writer. +// +// This is useful when the digest and size are known beforehand. When +// the size or digest is unknown, these values may be empty. +// +// Copy is buffered, so no need to wrap reader in buffered io. +func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error { + ws, err := cw.Status() + if err != nil { + return errors.Wrap(err, "failed to get status") + } + + if ws.Offset > 0 { + r, err = seekReader(r, ws.Offset, size) + if err != nil { + return errors.Wrapf(err, "unable to resume write to %v", ws.Ref) + } + } + + if _, err := copyWithBuffer(cw, r); err != nil { + return errors.Wrap(err, "failed to copy") + } + + if err := cw.Commit(ctx, size, expected, opts...); err != nil { + if !errdefs.IsAlreadyExists(err) { + return errors.Wrapf(err, "failed commit on ref %q", ws.Ref) + } + } + + return nil +} + +// CopyReaderAt copies to a writer from a given reader at for the given +// number of bytes. This copy does not commit the writer. +func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error { + ws, err := cw.Status() + if err != nil { + return err + } + + _, err = copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n)) + return err +} + +// CopyReader copies to a writer from a given reader, returning +// the number of bytes copied. +// Note: if the writer has a non-zero offset, the total number +// of bytes read may be greater than those copied if the reader +// is not an io.Seeker. +// This copy does not commit the writer. +func CopyReader(cw Writer, r io.Reader) (int64, error) { + ws, err := cw.Status() + if err != nil { + return 0, errors.Wrap(err, "failed to get status") + } + + if ws.Offset > 0 { + r, err = seekReader(r, ws.Offset, 0) + if err != nil { + return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref) + } + } + + return copyWithBuffer(cw, r) +} + +// seekReader attempts to seek the reader to the given offset, either by +// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding +// up to the given offset. +func seekReader(r io.Reader, offset, size int64) (io.Reader, error) { + // attempt to resolve r as a seeker and setup the offset. + seeker, ok := r.(io.Seeker) + if ok { + nn, err := seeker.Seek(offset, io.SeekStart) + if nn != offset { + return nil, errors.Wrapf(err, "failed to seek to offset %v", offset) + } + + if err != nil { + return nil, err + } + + return r, nil + } + + // ok, let's try io.ReaderAt! + readerAt, ok := r.(io.ReaderAt) + if ok && size > offset { + sr := io.NewSectionReader(readerAt, offset, size) + return sr, nil + } + + // well then, let's just discard up to the offset + n, err := copyWithBuffer(ioutil.Discard, io.LimitReader(r, offset)) + if err != nil { + return nil, errors.Wrap(err, "failed to discard to offset") + } + if n != offset { + return nil, errors.Errorf("unable to discard to offset") + } + + return r, nil +} + +func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) { + buf := bufPool.Get().(*[]byte) + written, err = io.CopyBuffer(dst, src, *buf) + bufPool.Put(buf) + return +} diff --git a/vendor/github.com/containerd/containerd/content/local/locks.go b/vendor/github.com/containerd/containerd/content/local/locks.go new file mode 100644 index 00000000..bc3bd18e --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/locks.go @@ -0,0 +1,51 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "sync" + + "github.com/containerd/containerd/errdefs" + "github.com/pkg/errors" +) + +// Handles locking references + +var ( + // locks lets us lock in process + locks = map[string]struct{}{} + locksMu sync.Mutex +) + +func tryLock(ref string) error { + locksMu.Lock() + defer locksMu.Unlock() + + if _, ok := locks[ref]; ok { + return errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked", ref) + } + + locks[ref] = struct{}{} + return nil +} + +func unlock(ref string) { + locksMu.Lock() + defer locksMu.Unlock() + + delete(locks, ref) +} diff --git a/vendor/github.com/containerd/containerd/content/local/readerat.go b/vendor/github.com/containerd/containerd/content/local/readerat.go new file mode 100644 index 00000000..42b99dc4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/readerat.go @@ -0,0 +1,40 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" +) + +// readerat implements io.ReaderAt in a completely stateless manner by opening +// the referenced file for each call to ReadAt. +type sizeReaderAt struct { + size int64 + fp *os.File +} + +func (ra sizeReaderAt) ReadAt(p []byte, offset int64) (int, error) { + return ra.fp.ReadAt(p, offset) +} + +func (ra sizeReaderAt) Size() int64 { + return ra.size +} + +func (ra sizeReaderAt) Close() error { + return ra.fp.Close() +} diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go new file mode 100644 index 00000000..efc58ea7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/store.go @@ -0,0 +1,678 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/log" + "github.com/sirupsen/logrus" + + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +// LabelStore is used to store mutable labels for digests +type LabelStore interface { + // Get returns all the labels for the given digest + Get(digest.Digest) (map[string]string, error) + + // Set sets all the labels for a given digest + Set(digest.Digest, map[string]string) error + + // Update replaces the given labels for a digest, + // a key with an empty value removes a label. + Update(digest.Digest, map[string]string) (map[string]string, error) +} + +// Store is digest-keyed store for content. All data written into the store is +// stored under a verifiable digest. +// +// Store can generally support multi-reader, single-writer ingest of data, +// including resumable ingest. +type store struct { + root string + ls LabelStore +} + +// NewStore returns a local content store +func NewStore(root string) (content.Store, error) { + return NewLabeledStore(root, nil) +} + +// NewLabeledStore returns a new content store using the provided label store +// +// Note: content stores which are used underneath a metadata store may not +// require labels and should use `NewStore`. `NewLabeledStore` is primarily +// useful for tests or standalone implementations. +func NewLabeledStore(root string, ls LabelStore) (content.Store, error) { + if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil { + return nil, err + } + + return &store{ + root: root, + ls: ls, + }, nil +} + +func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + p := s.blobPath(dgst) + fi, err := os.Stat(p) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) + } + + return content.Info{}, err + } + var labels map[string]string + if s.ls != nil { + labels, err = s.ls.Get(dgst) + if err != nil { + return content.Info{}, err + } + } + return s.info(dgst, fi, labels), nil +} + +func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]string) content.Info { + return content.Info{ + Digest: dgst, + Size: fi.Size(), + CreatedAt: fi.ModTime(), + UpdatedAt: getATime(fi), + Labels: labels, + } +} + +// ReaderAt returns an io.ReaderAt for the blob. +func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + p := s.blobPath(desc.Digest) + fi, err := os.Stat(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + return nil, errors.Wrapf(errdefs.ErrNotFound, "blob %s expected at %s", desc.Digest, p) + } + + fp, err := os.Open(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + return nil, errors.Wrapf(errdefs.ErrNotFound, "blob %s expected at %s", desc.Digest, p) + } + + return sizeReaderAt{size: fi.Size(), fp: fp}, nil +} + +// Delete removes a blob by its digest. +// +// While this is safe to do concurrently, safe exist-removal logic must hold +// some global lock on the store. +func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { + if err := os.RemoveAll(s.blobPath(dgst)); err != nil { + if !os.IsNotExist(err) { + return err + } + + return errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) + } + + return nil +} + +func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + if s.ls == nil { + return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store") + } + + p := s.blobPath(info.Digest) + fi, err := os.Stat(p) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrapf(errdefs.ErrNotFound, "content %v", info.Digest) + } + + return content.Info{}, err + } + + var ( + all bool + labels map[string]string + ) + if len(fieldpaths) > 0 { + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if labels == nil { + labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + labels[key] = info.Labels[key] + continue + } + + switch path { + case "labels": + all = true + labels = info.Labels + default: + return content.Info{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest) + } + } + } else { + all = true + labels = info.Labels + } + + if all { + err = s.ls.Set(info.Digest, labels) + } else { + labels, err = s.ls.Update(info.Digest, labels) + } + if err != nil { + return content.Info{}, err + } + + info = s.info(info.Digest, fi, labels) + info.UpdatedAt = time.Now() + + if err := os.Chtimes(p, info.UpdatedAt, info.CreatedAt); err != nil { + log.G(ctx).WithError(err).Warnf("could not change access time for %s", info.Digest) + } + + return info, nil +} + +func (s *store) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { + // TODO: Support filters + root := filepath.Join(s.root, "blobs") + var alg digest.Algorithm + return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + if !fi.IsDir() && !alg.Available() { + return nil + } + + // TODO(stevvooe): There are few more cases with subdirs that should be + // handled in case the layout gets corrupted. This isn't strict enough + // and may spew bad data. + + if path == root { + return nil + } + if filepath.Dir(path) == root { + alg = digest.Algorithm(filepath.Base(path)) + + if !alg.Available() { + alg = "" + return filepath.SkipDir + } + + // descending into a hash directory + return nil + } + + dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path)) + if err := dgst.Validate(); err != nil { + // log error but don't report + log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path") + // if we see this, it could mean some sort of corruption of the + // store or extra paths not expected previously. + } + + var labels map[string]string + if s.ls != nil { + labels, err = s.ls.Get(dgst) + if err != nil { + return err + } + } + return fn(s.info(dgst, fi, labels)) + }) +} + +func (s *store) Status(ctx context.Context, ref string) (content.Status, error) { + return s.status(s.ingestRoot(ref)) +} + +func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { + fp, err := os.Open(filepath.Join(s.root, "ingest")) + if err != nil { + return nil, err + } + + defer fp.Close() + + fis, err := fp.Readdir(-1) + if err != nil { + return nil, err + } + + filter, err := filters.ParseAll(fs...) + if err != nil { + return nil, err + } + + var active []content.Status + for _, fi := range fis { + p := filepath.Join(s.root, "ingest", fi.Name()) + stat, err := s.status(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + // TODO(stevvooe): This is a common error if uploads are being + // completed while making this listing. Need to consider taking a + // lock on the whole store to coordinate this aspect. + // + // Another option is to cleanup downloads asynchronously and + // coordinate this method with the cleanup process. + // + // For now, we just skip them, as they really don't exist. + continue + } + + if filter.Match(adaptStatus(stat)) { + active = append(active, stat) + } + } + + return active, nil +} + +// WalkStatusRefs is used to walk all status references +// Failed status reads will be logged and ignored, if +// this function is called while references are being altered, +// these error messages may be produced. +func (s *store) WalkStatusRefs(ctx context.Context, fn func(string) error) error { + fp, err := os.Open(filepath.Join(s.root, "ingest")) + if err != nil { + return err + } + + defer fp.Close() + + fis, err := fp.Readdir(-1) + if err != nil { + return err + } + + for _, fi := range fis { + rf := filepath.Join(s.root, "ingest", fi.Name(), "ref") + + ref, err := readFileString(rf) + if err != nil { + log.G(ctx).WithError(err).WithField("path", rf).Error("failed to read ingest ref") + continue + } + + if err := fn(ref); err != nil { + return err + } + } + + return nil +} + +// status works like stat above except uses the path to the ingest. +func (s *store) status(ingestPath string) (content.Status, error) { + dp := filepath.Join(ingestPath, "data") + fi, err := os.Stat(dp) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrap(errdefs.ErrNotFound, err.Error()) + } + return content.Status{}, err + } + + ref, err := readFileString(filepath.Join(ingestPath, "ref")) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrap(errdefs.ErrNotFound, err.Error()) + } + return content.Status{}, err + } + + startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat")) + if err != nil { + return content.Status{}, errors.Wrapf(err, "could not read startedat") + } + + updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat")) + if err != nil { + return content.Status{}, errors.Wrapf(err, "could not read updatedat") + } + + // because we don't write updatedat on every write, the mod time may + // actually be more up to date. + if fi.ModTime().After(updatedAt) { + updatedAt = fi.ModTime() + } + + return content.Status{ + Ref: ref, + Offset: fi.Size(), + Total: s.total(ingestPath), + UpdatedAt: updatedAt, + StartedAt: startedAt, + }, nil +} + +func adaptStatus(status content.Status) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "ref": + return status.Ref, true + } + + return "", false + }) +} + +// total attempts to resolve the total expected size for the write. +func (s *store) total(ingestPath string) int64 { + totalS, err := readFileString(filepath.Join(ingestPath, "total")) + if err != nil { + return 0 + } + + total, err := strconv.ParseInt(totalS, 10, 64) + if err != nil { + // represents a corrupted file, should probably remove. + return 0 + } + + return total +} + +// Writer begins or resumes the active writer identified by ref. If the writer +// is already in use, an error is returned. Only one writer may be in use per +// ref at a time. +// +// The argument `ref` is used to uniquely identify a long-lived writer transaction. +func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + // TODO(AkihiroSuda): we could create a random string or one calculated based on the context + // https://github.com/containerd/containerd/issues/2129#issuecomment-380255019 + if wOpts.Ref == "" { + return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") + } + var lockErr error + for count := uint64(0); count < 10; count++ { + time.Sleep(time.Millisecond * time.Duration(rand.Intn(1< 0 && status.Total > 0 && total != status.Total { + return status, errors.Errorf("provided total differs from status: %v != %v", total, status.Total) + } + + // TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes + fp, err := os.Open(data) + if err != nil { + return status, err + } + + p := bufPool.Get().(*[]byte) + status.Offset, err = io.CopyBuffer(digester.Hash(), fp, *p) + bufPool.Put(p) + fp.Close() + return status, err +} + +// writer provides the main implementation of the Writer method. The caller +// must hold the lock correctly and release on error if there is a problem. +func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) { + // TODO(stevvooe): Need to actually store expected here. We have + // code in the service that shouldn't be dealing with this. + if expected != "" { + p := s.blobPath(expected) + if _, err := os.Stat(p); err == nil { + return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected) + } + } + + path, refp, data := s.ingestPaths(ref) + + var ( + digester = digest.Canonical.Digester() + offset int64 + startedAt time.Time + updatedAt time.Time + ) + + foundValidIngest := false + // ensure that the ingest path has been created. + if err := os.Mkdir(path, 0755); err != nil { + if !os.IsExist(err) { + return nil, err + } + status, err := s.resumeStatus(ref, total, digester) + if err == nil { + foundValidIngest = true + updatedAt = status.UpdatedAt + startedAt = status.StartedAt + total = status.Total + offset = status.Offset + } else { + logrus.Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error()) + } + } + + if !foundValidIngest { + startedAt = time.Now() + updatedAt = startedAt + + // the ingest is new, we need to setup the target location. + // write the ref to a file for later use + if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil { + return nil, err + } + + if err := writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil { + return nil, err + } + + if err := writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil { + return nil, err + } + + if total > 0 { + if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil { + return nil, err + } + } + } + + fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return nil, errors.Wrap(err, "failed to open data file") + } + + if _, err := fp.Seek(offset, io.SeekStart); err != nil { + return nil, errors.Wrap(err, "could not seek to current write offset") + } + + return &writer{ + s: s, + fp: fp, + ref: ref, + path: path, + offset: offset, + total: total, + digester: digester, + startedAt: startedAt, + updatedAt: updatedAt, + }, nil +} + +// Abort an active transaction keyed by ref. If the ingest is active, it will +// be cancelled. Any resources associated with the ingest will be cleaned. +func (s *store) Abort(ctx context.Context, ref string) error { + root := s.ingestRoot(ref) + if err := os.RemoveAll(root); err != nil { + if os.IsNotExist(err) { + return errors.Wrapf(errdefs.ErrNotFound, "ingest ref %q", ref) + } + + return err + } + + return nil +} + +func (s *store) blobPath(dgst digest.Digest) string { + return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()) +} + +func (s *store) ingestRoot(ref string) string { + dgst := digest.FromString(ref) + return filepath.Join(s.root, "ingest", dgst.Hex()) +} + +// ingestPaths are returned. The paths are the following: +// +// - root: entire ingest directory +// - ref: name of the starting ref, must be unique +// - data: file where data is written +// +func (s *store) ingestPaths(ref string) (string, string, string) { + var ( + fp = s.ingestRoot(ref) + rp = filepath.Join(fp, "ref") + dp = filepath.Join(fp, "data") + ) + + return fp, rp, dp +} + +func readFileString(path string) (string, error) { + p, err := ioutil.ReadFile(path) + return string(p), err +} + +// readFileTimestamp reads a file with just a timestamp present. +func readFileTimestamp(p string) (time.Time, error) { + b, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrap(errdefs.ErrNotFound, err.Error()) + } + return time.Time{}, err + } + + var t time.Time + if err := t.UnmarshalText(b); err != nil { + return time.Time{}, errors.Wrapf(err, "could not parse timestamp file %v", p) + } + + return t, nil +} + +func writeTimestampFile(p string, t time.Time) error { + b, err := t.MarshalText() + if err != nil { + return err + } + return atomicWrite(p, b, 0666) +} + +func atomicWrite(path string, data []byte, mode os.FileMode) error { + tmp := fmt.Sprintf("%s.tmp", path) + f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode) + if err != nil { + return errors.Wrap(err, "create tmp file") + } + _, err = f.Write(data) + f.Close() + if err != nil { + return errors.Wrap(err, "write atomic data") + } + return os.Rename(tmp, path) +} diff --git a/vendor/github.com/containerd/containerd/sys/filesys.go b/vendor/github.com/containerd/containerd/content/local/store_unix.go similarity index 61% rename from vendor/github.com/containerd/containerd/sys/filesys.go rename to vendor/github.com/containerd/containerd/content/local/store_unix.go index 825d21d1..f5f34fd0 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys.go +++ b/vendor/github.com/containerd/containerd/content/local/store_unix.go @@ -1,3 +1,5 @@ +// +build linux solaris darwin freebsd + /* Copyright The containerd Authors. @@ -14,22 +16,20 @@ limitations under the License. */ -package sys +package local -import "os" +import ( + "os" + "syscall" + "time" -// IsFifo checks if a file is a (named pipe) fifo -// if the file does not exist then it returns false -func IsFifo(path string) (bool, error) { - stat, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err + "github.com/containerd/containerd/sys" +) + +func getATime(fi os.FileInfo) time.Time { + if st, ok := fi.Sys().(*syscall.Stat_t); ok { + return sys.StatATimeAsTime(st) } - if stat.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return true, nil - } - return false, nil + + return fi.ModTime() } diff --git a/vendor/github.com/containerd/containerd/content/local/store_windows.go b/vendor/github.com/containerd/containerd/content/local/store_windows.go new file mode 100644 index 00000000..bce84997 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/store_windows.go @@ -0,0 +1,26 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + "time" +) + +func getATime(fi os.FileInfo) time.Time { + return fi.ModTime() +} diff --git a/vendor/github.com/containerd/containerd/content/local/writer.go b/vendor/github.com/containerd/containerd/content/local/writer.go new file mode 100644 index 00000000..3a94744e --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/writer.go @@ -0,0 +1,207 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "context" + "io" + "os" + "path/filepath" + "runtime" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// writer represents a write transaction against the blob store. +type writer struct { + s *store + fp *os.File // opened data file + path string // path to writer dir + ref string // ref key + offset int64 + total int64 + digester digest.Digester + startedAt time.Time + updatedAt time.Time +} + +func (w *writer) Status() (content.Status, error) { + return content.Status{ + Ref: w.ref, + Offset: w.offset, + Total: w.total, + StartedAt: w.startedAt, + UpdatedAt: w.updatedAt, + }, nil +} + +// Digest returns the current digest of the content, up to the current write. +// +// Cannot be called concurrently with `Write`. +func (w *writer) Digest() digest.Digest { + return w.digester.Digest() +} + +// Write p to the transaction. +// +// Note that writes are unbuffered to the backing file. When writing, it is +// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer. +func (w *writer) Write(p []byte) (n int, err error) { + n, err = w.fp.Write(p) + w.digester.Hash().Write(p[:n]) + w.offset += int64(len(p)) + w.updatedAt = time.Now() + return n, err +} + +func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + // Ensure even on error the writer is fully closed + defer unlock(w.ref) + + var base content.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return err + } + } + + fp := w.fp + w.fp = nil + + if fp == nil { + return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer") + } + + if err := fp.Sync(); err != nil { + fp.Close() + return errors.Wrap(err, "sync failed") + } + + fi, err := fp.Stat() + closeErr := fp.Close() + if err != nil { + return errors.Wrap(err, "stat on ingest file failed") + } + if closeErr != nil { + return errors.Wrap(err, "failed to close ingest file") + } + + if size > 0 && size != fi.Size() { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fi.Size(), size) + } + + dgst := w.digester.Digest() + if expected != "" && expected != dgst { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected) + } + + var ( + ingest = filepath.Join(w.path, "data") + target = w.s.blobPath(dgst) + ) + + // make sure parent directories of blob exist + if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { + return err + } + + if _, err := os.Stat(target); err == nil { + // collision with the target file! + if err := os.RemoveAll(w.path); err != nil { + log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory") + } + return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst) + } + + if err := os.Rename(ingest, target); err != nil { + return err + } + + // Ingest has now been made available in the content store, attempt to complete + // setting metadata but errors should only be logged and not returned since + // the content store cannot be cleanly rolled back. + + commitTime := time.Now() + if err := os.Chtimes(target, commitTime, commitTime); err != nil { + log.G(ctx).WithField("digest", dgst).Errorf("failed to change file time to commit time") + } + + // clean up!! + if err := os.RemoveAll(w.path); err != nil { + log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory") + } + + if w.s.ls != nil && base.Labels != nil { + if err := w.s.ls.Set(dgst, base.Labels); err != nil { + log.G(ctx).WithField("digest", dgst).Errorf("failed to set labels") + } + } + + // change to readonly, more important for read, but provides _some_ + // protection from this point on. We use the existing perms with a mask + // only allowing reads honoring the umask on creation. + // + // This removes write and exec, only allowing read per the creation umask. + // + // NOTE: Windows does not support this operation + if runtime.GOOS != "windows" { + if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil { + log.G(ctx).WithField("ref", w.ref).Errorf("failed to make readonly") + } + } + + return nil +} + +// Close the writer, flushing any unwritten data and leaving the progress in +// tact. +// +// If one needs to resume the transaction, a new writer can be obtained from +// `Ingester.Writer` using the same key. The write can then be continued +// from it was left off. +// +// To abandon a transaction completely, first call close then `IngestManager.Abort` to +// clean up the associated resources. +func (w *writer) Close() (err error) { + if w.fp != nil { + w.fp.Sync() + err = w.fp.Close() + writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt) + w.fp = nil + unlock(w.ref) + return + } + + return nil +} + +func (w *writer) Truncate(size int64) error { + if size != 0 { + return errors.New("Truncate: unsupported size") + } + w.offset = 0 + w.digester.Hash().Reset() + if _, err := w.fp.Seek(0, io.SeekStart); err != nil { + return err + } + return w.fp.Truncate(0) +} diff --git a/vendor/github.com/containerd/containerd/content/proxy/content_reader.go b/vendor/github.com/containerd/containerd/content/proxy/content_reader.go new file mode 100644 index 00000000..b06e48fa --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/proxy/content_reader.go @@ -0,0 +1,65 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proxy + +import ( + "context" + + contentapi "github.com/containerd/containerd/api/services/content/v1" + digest "github.com/opencontainers/go-digest" +) + +type remoteReaderAt struct { + ctx context.Context + digest digest.Digest + size int64 + client contentapi.ContentClient +} + +func (ra *remoteReaderAt) Size() int64 { + return ra.size +} + +func (ra *remoteReaderAt) ReadAt(p []byte, off int64) (n int, err error) { + rr := &contentapi.ReadContentRequest{ + Digest: ra.digest, + Offset: off, + Size_: int64(len(p)), + } + rc, err := ra.client.Read(ra.ctx, rr) + if err != nil { + return 0, err + } + + for len(p) > 0 { + var resp *contentapi.ReadContentResponse + // fill our buffer up until we can fill p. + resp, err = rc.Recv() + if err != nil { + return n, err + } + + copied := copy(p, resp.Data) + n += copied + p = p[copied:] + } + return n, nil +} + +func (ra *remoteReaderAt) Close() error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/content/proxy/content_store.go b/vendor/github.com/containerd/containerd/content/proxy/content_store.go new file mode 100644 index 00000000..217b7465 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/proxy/content_store.go @@ -0,0 +1,234 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proxy + +import ( + "context" + "io" + + contentapi "github.com/containerd/containerd/api/services/content/v1" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + protobuftypes "github.com/gogo/protobuf/types" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type proxyContentStore struct { + client contentapi.ContentClient +} + +// NewContentStore returns a new content store which communicates over a GRPC +// connection using the containerd content GRPC API. +func NewContentStore(client contentapi.ContentClient) content.Store { + return &proxyContentStore{ + client: client, + } +} + +func (pcs *proxyContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + resp, err := pcs.client.Info(ctx, &contentapi.InfoRequest{ + Digest: dgst, + }) + if err != nil { + return content.Info{}, errdefs.FromGRPC(err) + } + + return infoFromGRPC(resp.Info), nil +} + +func (pcs *proxyContentStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { + session, err := pcs.client.List(ctx, &contentapi.ListContentRequest{ + Filters: filters, + }) + if err != nil { + return errdefs.FromGRPC(err) + } + + for { + msg, err := session.Recv() + if err != nil { + if err != io.EOF { + return errdefs.FromGRPC(err) + } + + break + } + + for _, info := range msg.Info { + if err := fn(infoFromGRPC(info)); err != nil { + return err + } + } + } + + return nil +} + +func (pcs *proxyContentStore) Delete(ctx context.Context, dgst digest.Digest) error { + if _, err := pcs.client.Delete(ctx, &contentapi.DeleteContentRequest{ + Digest: dgst, + }); err != nil { + return errdefs.FromGRPC(err) + } + + return nil +} + +// ReaderAt ignores MediaType. +func (pcs *proxyContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + i, err := pcs.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } + + return &remoteReaderAt{ + ctx: ctx, + digest: desc.Digest, + size: i.Size, + client: pcs.client, + }, nil +} + +func (pcs *proxyContentStore) Status(ctx context.Context, ref string) (content.Status, error) { + resp, err := pcs.client.Status(ctx, &contentapi.StatusRequest{ + Ref: ref, + }) + if err != nil { + return content.Status{}, errdefs.FromGRPC(err) + } + + status := resp.Status + return content.Status{ + Ref: status.Ref, + StartedAt: status.StartedAt, + UpdatedAt: status.UpdatedAt, + Offset: status.Offset, + Total: status.Total, + Expected: status.Expected, + }, nil +} + +func (pcs *proxyContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + resp, err := pcs.client.Update(ctx, &contentapi.UpdateRequest{ + Info: infoToGRPC(info), + UpdateMask: &protobuftypes.FieldMask{ + Paths: fieldpaths, + }, + }) + if err != nil { + return content.Info{}, errdefs.FromGRPC(err) + } + return infoFromGRPC(resp.Info), nil +} + +func (pcs *proxyContentStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { + resp, err := pcs.client.ListStatuses(ctx, &contentapi.ListStatusesRequest{ + Filters: filters, + }) + if err != nil { + return nil, errdefs.FromGRPC(err) + } + + var statuses []content.Status + for _, status := range resp.Statuses { + statuses = append(statuses, content.Status{ + Ref: status.Ref, + StartedAt: status.StartedAt, + UpdatedAt: status.UpdatedAt, + Offset: status.Offset, + Total: status.Total, + Expected: status.Expected, + }) + } + + return statuses, nil +} + +// Writer ignores MediaType. +func (pcs *proxyContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + wrclient, offset, err := pcs.negotiate(ctx, wOpts.Ref, wOpts.Desc.Size, wOpts.Desc.Digest) + if err != nil { + return nil, errdefs.FromGRPC(err) + } + + return &remoteWriter{ + ref: wOpts.Ref, + client: wrclient, + offset: offset, + }, nil +} + +// Abort implements asynchronous abort. It starts a new write session on the ref l +func (pcs *proxyContentStore) Abort(ctx context.Context, ref string) error { + if _, err := pcs.client.Abort(ctx, &contentapi.AbortRequest{ + Ref: ref, + }); err != nil { + return errdefs.FromGRPC(err) + } + + return nil +} + +func (pcs *proxyContentStore) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) { + wrclient, err := pcs.client.Write(ctx) + if err != nil { + return nil, 0, err + } + + if err := wrclient.Send(&contentapi.WriteContentRequest{ + Action: contentapi.WriteActionStat, + Ref: ref, + Total: size, + Expected: expected, + }); err != nil { + return nil, 0, err + } + + resp, err := wrclient.Recv() + if err != nil { + return nil, 0, err + } + + return wrclient, resp.Offset, nil +} + +func infoToGRPC(info content.Info) contentapi.Info { + return contentapi.Info{ + Digest: info.Digest, + Size_: info.Size, + CreatedAt: info.CreatedAt, + UpdatedAt: info.UpdatedAt, + Labels: info.Labels, + } +} + +func infoFromGRPC(info contentapi.Info) content.Info { + return content.Info{ + Digest: info.Digest, + Size: info.Size_, + CreatedAt: info.CreatedAt, + UpdatedAt: info.UpdatedAt, + Labels: info.Labels, + } +} diff --git a/vendor/github.com/containerd/containerd/content/proxy/content_writer.go b/vendor/github.com/containerd/containerd/content/proxy/content_writer.go new file mode 100644 index 00000000..5434a156 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/proxy/content_writer.go @@ -0,0 +1,139 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proxy + +import ( + "context" + "io" + + contentapi "github.com/containerd/containerd/api/services/content/v1" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type remoteWriter struct { + ref string + client contentapi.Content_WriteClient + offset int64 + digest digest.Digest +} + +// send performs a synchronous req-resp cycle on the client. +func (rw *remoteWriter) send(req *contentapi.WriteContentRequest) (*contentapi.WriteContentResponse, error) { + if err := rw.client.Send(req); err != nil { + return nil, err + } + + resp, err := rw.client.Recv() + + if err == nil { + // try to keep these in sync + if resp.Digest != "" { + rw.digest = resp.Digest + } + } + + return resp, err +} + +func (rw *remoteWriter) Status() (content.Status, error) { + resp, err := rw.send(&contentapi.WriteContentRequest{ + Action: contentapi.WriteActionStat, + }) + if err != nil { + return content.Status{}, errors.Wrap(errdefs.FromGRPC(err), "error getting writer status") + } + + return content.Status{ + Ref: rw.ref, + Offset: resp.Offset, + Total: resp.Total, + StartedAt: resp.StartedAt, + UpdatedAt: resp.UpdatedAt, + }, nil +} + +func (rw *remoteWriter) Digest() digest.Digest { + return rw.digest +} + +func (rw *remoteWriter) Write(p []byte) (n int, err error) { + offset := rw.offset + + resp, err := rw.send(&contentapi.WriteContentRequest{ + Action: contentapi.WriteActionWrite, + Offset: offset, + Data: p, + }) + if err != nil { + return 0, errors.Wrap(errdefs.FromGRPC(err), "failed to send write") + } + + n = int(resp.Offset - offset) + if n < len(p) { + err = io.ErrShortWrite + } + + rw.offset += int64(n) + if resp.Digest != "" { + rw.digest = resp.Digest + } + return +} + +func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + var base content.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return err + } + } + resp, err := rw.send(&contentapi.WriteContentRequest{ + Action: contentapi.WriteActionCommit, + Total: size, + Offset: rw.offset, + Expected: expected, + Labels: base.Labels, + }) + if err != nil { + return errors.Wrap(errdefs.FromGRPC(err), "commit failed") + } + + if size != 0 && resp.Offset != size { + return errors.Errorf("unexpected size: %v != %v", resp.Offset, size) + } + + if expected != "" && resp.Digest != expected { + return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected) + } + + rw.digest = resp.Digest + rw.offset = resp.Offset + return nil +} + +func (rw *remoteWriter) Truncate(size int64) error { + // This truncation won't actually be validated until a write is issued. + rw.offset = size + return nil +} + +func (rw *remoteWriter) Close() error { + return rw.client.CloseSend() +} diff --git a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go new file mode 100644 index 00000000..b7cf1765 --- /dev/null +++ b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go @@ -0,0 +1,54 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package seccomp + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/oci" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// WithProfile receives the name of a file stored on disk comprising a json +// formatted seccomp profile, as specified by the opencontainers/runtime-spec. +// The profile is read from the file, unmarshaled, and set to the spec. +func WithProfile(profile string) oci.SpecOpts { + return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { + s.Linux.Seccomp = &specs.LinuxSeccomp{} + f, err := ioutil.ReadFile(profile) + if err != nil { + return fmt.Errorf("cannot load seccomp profile %q: %v", profile, err) + } + if err := json.Unmarshal(f, s.Linux.Seccomp); err != nil { + return fmt.Errorf("decoding seccomp profile failed %q: %v", profile, err) + } + return nil + } +} + +// WithDefaultProfile sets the default seccomp profile to the spec. +// Note: must follow the setting of process capabilities +func WithDefaultProfile() oci.SpecOpts { + return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { + s.Linux.Seccomp = DefaultProfile(s) + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go new file mode 100644 index 00000000..af40395d --- /dev/null +++ b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go @@ -0,0 +1,585 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package seccomp + +import ( + "runtime" + + "golang.org/x/sys/unix" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +func arches() []specs.Arch { + switch runtime.GOARCH { + case "amd64": + return []specs.Arch{specs.ArchX86_64, specs.ArchX86, specs.ArchX32} + case "arm64": + return []specs.Arch{specs.ArchARM, specs.ArchAARCH64} + case "mips64": + return []specs.Arch{specs.ArchMIPS, specs.ArchMIPS64, specs.ArchMIPS64N32} + case "mips64n32": + return []specs.Arch{specs.ArchMIPS, specs.ArchMIPS64, specs.ArchMIPS64N32} + case "mipsel64": + return []specs.Arch{specs.ArchMIPSEL, specs.ArchMIPSEL64, specs.ArchMIPSEL64N32} + case "mipsel64n32": + return []specs.Arch{specs.ArchMIPSEL, specs.ArchMIPSEL64, specs.ArchMIPSEL64N32} + case "s390x": + return []specs.Arch{specs.ArchS390, specs.ArchS390X} + default: + return []specs.Arch{} + } +} + +// DefaultProfile defines the whitelist for the default seccomp profile. +func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { + syscalls := []specs.LinuxSyscall{ + { + Names: []string{ + "accept", + "accept4", + "access", + "alarm", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_getres", + "clock_gettime", + "clock_nanosleep", + "close", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "io_pgetevents", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "ipc", + "kill", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "memfd_create", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedsend", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "pause", + "pipe", + "pipe2", + "poll", + "ppoll", + "prctl", + "pread64", + "preadv", + "prlimit64", + "pselect6", + "pwrite64", + "pwritev", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigprocmask", + "sigreturn", + "socket", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "statx", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "syslog", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timerfd_create", + "timerfd_gettime", + "timerfd_settime", + "timer_getoverrun", + "timer_gettime", + "timer_settime", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }, + { + Names: []string{"personality"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{ + { + Index: 0, + Value: 0x0, + Op: specs.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{ + { + Index: 0, + Value: 0x0008, + Op: specs.OpEqualTo, + }, + }, + }, + { + Names: []string{"personality"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{ + { + Index: 0, + Value: 0xffffffff, + Op: specs.OpEqualTo, + }, + }, + }, + } + + s := &specs.LinuxSeccomp{ + DefaultAction: specs.ActErrno, + Architectures: arches(), + Syscalls: syscalls, + } + + // include by arch + switch runtime.GOARCH { + case "arm", "arm64": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "arm_fadvise64_64", + "arm_sync_file_range", + "breakpoint", + "cacheflush", + "set_tls", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "amd64": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "arch_prctl", + "modify_ldt", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "386": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "modify_ldt", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "s390", "s390x": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "s390_pci_mmio_read", + "s390_pci_mmio_write", + "s390_runtime_instr", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + } + + admin := false + for _, c := range sp.Process.Capabilities.Bounding { + switch c { + case "CAP_DAC_READ_SEARCH": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{"open_by_handle_at"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_ADMIN": + admin = true + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "bpf", + "clone", + "fanotify_init", + "lookup_dcookie", + "mount", + "name_to_handle_at", + "perf_event_open", + "setdomainname", + "sethostname", + "setns", + "umount", + "umount2", + "unshare", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_BOOT": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{"reboot"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_CHROOT": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{"chroot"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_MODULE": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "delete_module", + "init_module", + "finit_module", + "query_module", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_PACCT": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{"acct"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_PTRACE": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "kcmp", + "process_vm_readv", + "process_vm_writev", + "ptrace", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_RAWIO": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "iopl", + "ioperm", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_TIME": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "settimeofday", + "stime", + "adjtimex", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + case "CAP_SYS_TTY_CONFIG": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{"vhangup"}, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{}, + }) + } + } + + if !admin { + switch runtime.GOARCH { + case "s390", "s390x": + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "clone", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{ + { + Index: 1, + Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET | unix.CLONE_NEWCGROUP, + ValueTwo: 0, + Op: specs.OpMaskedEqual, + }, + }, + }) + default: + s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{ + Names: []string{ + "clone", + }, + Action: specs.ActAllow, + Args: []specs.LinuxSeccompArg{ + { + Index: 0, + Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET | unix.CLONE_NEWCGROUP, + ValueTwo: 0, + Op: specs.OpMaskedEqual, + }, + }, + }) + } + } + + return s +} diff --git a/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default_unsupported.go b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default_unsupported.go new file mode 100644 index 00000000..14d7b75e --- /dev/null +++ b/vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default_unsupported.go @@ -0,0 +1,26 @@ +// +build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package seccomp + +import specs "github.com/opencontainers/runtime-spec/specs-go" + +// DefaultProfile defines the whitelist for the default seccomp profile. +func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp { + return &specs.LinuxSeccomp{} +} diff --git a/vendor/github.com/containerd/containerd/diff/apply/apply.go b/vendor/github.com/containerd/containerd/diff/apply/apply.go new file mode 100644 index 00000000..ce89daaf --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/apply/apply.go @@ -0,0 +1,131 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package apply + +import ( + "context" + "io" + "io/ioutil" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// NewFileSystemApplier returns an applier which simply mounts +// and applies diff onto the mounted filesystem. +func NewFileSystemApplier(cs content.Provider) diff.Applier { + return &fsApplier{ + store: cs, + } +} + +type fsApplier struct { + store content.Provider +} + +var emptyDesc = ocispec.Descriptor{} + +// Apply applies the content associated with the provided digests onto the +// provided mounts. Archive content will be extracted and decompressed if +// necessary. +func (s *fsApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispec.Descriptor, err error) { + t1 := time.Now() + defer func() { + if err == nil { + log.G(ctx).WithFields(logrus.Fields{ + "d": time.Since(t1), + "dgst": desc.Digest, + "size": desc.Size, + "media": desc.MediaType, + }).Debugf("diff applied") + } + }() + + var config diff.ApplyConfig + for _, o := range opts { + if err := o(ctx, desc, &config); err != nil { + return emptyDesc, errors.Wrap(err, "failed to apply config opt") + } + } + + ra, err := s.store.ReaderAt(ctx, desc) + if err != nil { + return emptyDesc, errors.Wrap(err, "failed to get reader from content store") + } + defer ra.Close() + + var processors []diff.StreamProcessor + processor := diff.NewProcessorChain(desc.MediaType, content.NewReader(ra)) + processors = append(processors, processor) + for { + if processor, err = diff.GetProcessor(ctx, processor, config.ProcessorPayloads); err != nil { + return emptyDesc, errors.Wrapf(err, "failed to get stream processor for %s", desc.MediaType) + } + processors = append(processors, processor) + if processor.MediaType() == ocispec.MediaTypeImageLayer { + break + } + } + defer processor.Close() + + digester := digest.Canonical.Digester() + rc := &readCounter{ + r: io.TeeReader(processor, digester.Hash()), + } + + if err := apply(ctx, mounts, rc); err != nil { + return emptyDesc, err + } + + // Read any trailing data + if _, err := io.Copy(ioutil.Discard, rc); err != nil { + return emptyDesc, err + } + + for _, p := range processors { + if ep, ok := p.(interface { + Err() error + }); ok { + if err := ep.Err(); err != nil { + return emptyDesc, err + } + } + } + return ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayer, + Size: rc.c, + Digest: digester.Digest(), + }, nil +} + +type readCounter struct { + r io.Reader + c int64 +} + +func (rc *readCounter) Read(p []byte) (n int, err error) { + n, err = rc.r.Read(p) + rc.c += int64(n) + return +} diff --git a/vendor/github.com/containerd/containerd/diff/apply/apply_linux.go b/vendor/github.com/containerd/containerd/diff/apply/apply_linux.go new file mode 100644 index 00000000..bbe9c17d --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/apply/apply_linux.go @@ -0,0 +1,134 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package apply + +import ( + "context" + "io" + "strings" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/mount" + "github.com/opencontainers/runc/libcontainer/system" + "github.com/pkg/errors" +) + +func apply(ctx context.Context, mounts []mount.Mount, r io.Reader) error { + switch { + case len(mounts) == 1 && mounts[0].Type == "overlay": + // OverlayConvertWhiteout (mknod c 0 0) doesn't work in userns. + // https://github.com/containerd/containerd/issues/3762 + if system.RunningInUserNS() { + break + } + path, parents, err := getOverlayPath(mounts[0].Options) + if err != nil { + if errdefs.IsInvalidArgument(err) { + break + } + return err + } + opts := []archive.ApplyOpt{ + archive.WithConvertWhiteout(archive.OverlayConvertWhiteout), + } + if len(parents) > 0 { + opts = append(opts, archive.WithParents(parents)) + } + _, err = archive.Apply(ctx, path, r, opts...) + return err + case len(mounts) == 1 && mounts[0].Type == "aufs": + path, parents, err := getAufsPath(mounts[0].Options) + if err != nil { + if errdefs.IsInvalidArgument(err) { + break + } + return err + } + opts := []archive.ApplyOpt{ + archive.WithConvertWhiteout(archive.AufsConvertWhiteout), + } + if len(parents) > 0 { + opts = append(opts, archive.WithParents(parents)) + } + _, err = archive.Apply(ctx, path, r, opts...) + return err + } + return mount.WithTempMount(ctx, mounts, func(root string) error { + _, err := archive.Apply(ctx, root, r) + return err + }) +} + +func getOverlayPath(options []string) (upper string, lower []string, err error) { + const upperdirPrefix = "upperdir=" + const lowerdirPrefix = "lowerdir=" + + for _, o := range options { + if strings.HasPrefix(o, upperdirPrefix) { + upper = strings.TrimPrefix(o, upperdirPrefix) + } else if strings.HasPrefix(o, lowerdirPrefix) { + lower = strings.Split(strings.TrimPrefix(o, lowerdirPrefix), ":") + } + } + if upper == "" { + return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "upperdir not found") + } + + return +} + +// getAufsPath handles options as given by the containerd aufs package only, +// formatted as "br:=rw[:=ro+wh]*" +func getAufsPath(options []string) (upper string, lower []string, err error) { + const ( + sep = ":" + brPrefix = "br:" + rwSuffix = "=rw" + roSuffix = "=ro+wh" + ) + for _, o := range options { + if strings.HasPrefix(o, brPrefix) { + o = strings.TrimPrefix(o, brPrefix) + } else { + continue + } + + for _, b := range strings.Split(o, sep) { + if strings.HasSuffix(b, rwSuffix) { + if upper != "" { + return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "multiple rw branch found") + } + upper = strings.TrimSuffix(b, rwSuffix) + } else if strings.HasSuffix(b, roSuffix) { + if upper == "" { + return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "rw branch be first") + } + lower = append(lower, strings.TrimSuffix(b, roSuffix)) + } else { + return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "unhandled aufs suffix") + } + + } + } + if upper == "" { + return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "rw branch not found") + } + return +} diff --git a/vendor/github.com/containerd/containerd/diff/apply/apply_other.go b/vendor/github.com/containerd/containerd/diff/apply/apply_other.go new file mode 100644 index 00000000..01e0f11b --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/apply/apply_other.go @@ -0,0 +1,34 @@ +// +build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package apply + +import ( + "context" + "io" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/mount" +) + +func apply(ctx context.Context, mounts []mount.Mount, r io.Reader) error { + return mount.WithTempMount(ctx, mounts, func(root string) error { + _, err := archive.Apply(ctx, root, r) + return err + }) +} diff --git a/vendor/github.com/containerd/containerd/diff/diff.go b/vendor/github.com/containerd/containerd/diff/diff.go new file mode 100644 index 00000000..17aab616 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/diff.go @@ -0,0 +1,107 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "context" + + "github.com/containerd/containerd/mount" + "github.com/gogo/protobuf/types" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Config is used to hold parameters needed for a diff operation +type Config struct { + // MediaType is the type of diff to generate + // Default depends on the differ, + // i.e. application/vnd.oci.image.layer.v1.tar+gzip + MediaType string + + // Reference is the content upload reference + // Default will use a random reference string + Reference string + + // Labels are the labels to apply to the generated content + Labels map[string]string +} + +// Opt is used to configure a diff operation +type Opt func(*Config) error + +// Comparer allows creation of filesystem diffs between mounts +type Comparer interface { + // Compare computes the difference between two mounts and returns a + // descriptor for the computed diff. The options can provide + // a ref which can be used to track the content creation of the diff. + // The media type which is used to determine the format of the created + // content can also be provided as an option. + Compare(ctx context.Context, lower, upper []mount.Mount, opts ...Opt) (ocispec.Descriptor, error) +} + +// ApplyConfig is used to hold parameters needed for a apply operation +type ApplyConfig struct { + // ProcessorPayloads specifies the payload sent to various processors + ProcessorPayloads map[string]*types.Any +} + +// ApplyOpt is used to configure an Apply operation +type ApplyOpt func(context.Context, ocispec.Descriptor, *ApplyConfig) error + +// Applier allows applying diffs between mounts +type Applier interface { + // Apply applies the content referred to by the given descriptor to + // the provided mount. The method of applying is based on the + // implementation and content descriptor. For example, in the common + // case the descriptor is a file system difference in tar format, + // that tar would be applied on top of the mounts. + Apply(ctx context.Context, desc ocispec.Descriptor, mount []mount.Mount, opts ...ApplyOpt) (ocispec.Descriptor, error) +} + +// WithMediaType sets the media type to use for creating the diff, without +// specifying the differ will choose a default. +func WithMediaType(m string) Opt { + return func(c *Config) error { + c.MediaType = m + return nil + } +} + +// WithReference is used to set the content upload reference used by +// the diff operation. This allows the caller to track the upload through +// the content store. +func WithReference(ref string) Opt { + return func(c *Config) error { + c.Reference = ref + return nil + } +} + +// WithLabels is used to set content labels on the created diff content. +func WithLabels(labels map[string]string) Opt { + return func(c *Config) error { + c.Labels = labels + return nil + } +} + +// WithPayloads sets the apply processor payloads to the config +func WithPayloads(payloads map[string]*types.Any) ApplyOpt { + return func(_ context.Context, _ ocispec.Descriptor, c *ApplyConfig) error { + c.ProcessorPayloads = payloads + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/diff/stream.go b/vendor/github.com/containerd/containerd/diff/stream.go new file mode 100644 index 00000000..1b625fea --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/stream.go @@ -0,0 +1,187 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "context" + "io" + "os" + + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/images" + "github.com/gogo/protobuf/types" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + handlers []Handler + + // ErrNoProcessor is returned when no stream processor is available for a media-type + ErrNoProcessor = errors.New("no processor for media-type") +) + +func init() { + // register the default compression handler + RegisterProcessor(compressedHandler) +} + +// RegisterProcessor registers a stream processor for media-types +func RegisterProcessor(handler Handler) { + handlers = append(handlers, handler) +} + +// GetProcessor returns the processor for a media-type +func GetProcessor(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + // reverse this list so that user configured handlers come up first + for i := len(handlers) - 1; i >= 0; i-- { + processor, ok := handlers[i](ctx, stream.MediaType()) + if ok { + return processor(ctx, stream, payloads) + } + } + return nil, ErrNoProcessor +} + +// Handler checks a media-type and initializes the processor +type Handler func(ctx context.Context, mediaType string) (StreamProcessorInit, bool) + +// StaticHandler returns the processor init func for a static media-type +func StaticHandler(expectedMediaType string, fn StreamProcessorInit) Handler { + return func(ctx context.Context, mediaType string) (StreamProcessorInit, bool) { + if mediaType == expectedMediaType { + return fn, true + } + return nil, false + } +} + +// StreamProcessorInit returns the initialized stream processor +type StreamProcessorInit func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) + +// RawProcessor provides access to direct fd for processing +type RawProcessor interface { + // File returns the fd for the read stream of the underlying processor + File() *os.File +} + +// StreamProcessor handles processing a content stream and transforming it into a different media-type +type StreamProcessor interface { + io.ReadCloser + + // MediaType is the resulting media-type that the processor processes the stream into + MediaType() string +} + +func compressedHandler(ctx context.Context, mediaType string) (StreamProcessorInit, bool) { + compressed, err := images.DiffCompression(ctx, mediaType) + if err != nil { + return nil, false + } + if compressed != "" { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + ds, err := compression.DecompressStream(stream) + if err != nil { + return nil, err + } + + return &compressedProcessor{ + rc: ds, + }, nil + }, true + } + return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + return &stdProcessor{ + rc: stream, + }, nil + }, true +} + +// NewProcessorChain initialized the root StreamProcessor +func NewProcessorChain(mt string, r io.Reader) StreamProcessor { + return &processorChain{ + mt: mt, + rc: r, + } +} + +type processorChain struct { + mt string + rc io.Reader +} + +func (c *processorChain) MediaType() string { + return c.mt +} + +func (c *processorChain) Read(p []byte) (int, error) { + return c.rc.Read(p) +} + +func (c *processorChain) Close() error { + return nil +} + +type stdProcessor struct { + rc StreamProcessor +} + +func (c *stdProcessor) MediaType() string { + return ocispec.MediaTypeImageLayer +} + +func (c *stdProcessor) Read(p []byte) (int, error) { + return c.rc.Read(p) +} + +func (c *stdProcessor) Close() error { + return nil +} + +type compressedProcessor struct { + rc io.ReadCloser +} + +func (c *compressedProcessor) MediaType() string { + return ocispec.MediaTypeImageLayer +} + +func (c *compressedProcessor) Read(p []byte) (int, error) { + return c.rc.Read(p) +} + +func (c *compressedProcessor) Close() error { + return c.rc.Close() +} + +func BinaryHandler(id, returnsMediaType string, mediaTypes []string, path string, args []string) Handler { + set := make(map[string]struct{}, len(mediaTypes)) + for _, m := range mediaTypes { + set[m] = struct{}{} + } + return func(_ context.Context, mediaType string) (StreamProcessorInit, bool) { + if _, ok := set[mediaType]; ok { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + payload := payloads[id] + return NewBinaryProcessor(ctx, mediaType, returnsMediaType, stream, path, args, payload) + }, true + } + return nil, false + } +} + +const mediaTypeEnvVar = "STREAM_PROCESSOR_MEDIATYPE" diff --git a/vendor/github.com/containerd/containerd/diff/stream_unix.go b/vendor/github.com/containerd/containerd/diff/stream_unix.go new file mode 100644 index 00000000..28f38d99 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/stream_unix.go @@ -0,0 +1,146 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "sync" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" +) + +// NewBinaryProcessor returns a binary processor for use with processing content streams +func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) { + cmd := exec.CommandContext(ctx, name, args...) + cmd.Env = os.Environ() + + var payloadC io.Closer + if payload != nil { + data, err := proto.Marshal(payload) + if err != nil { + return nil, err + } + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + go func() { + io.Copy(w, bytes.NewReader(data)) + w.Close() + }() + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + payloadC = r + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt)) + var ( + stdin io.Reader + closer func() error + err error + ) + if f, ok := stream.(RawProcessor); ok { + stdin = f.File() + closer = f.File().Close + } else { + stdin = stream + } + cmd.Stdin = stdin + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + cmd.Stdout = w + + stderr := bytes.NewBuffer(nil) + cmd.Stderr = stderr + + if err := cmd.Start(); err != nil { + return nil, err + } + p := &binaryProcessor{ + cmd: cmd, + r: r, + mt: rmt, + stderr: stderr, + } + go p.wait() + + // close after start and dup + w.Close() + if closer != nil { + closer() + } + if payloadC != nil { + payloadC.Close() + } + return p, nil +} + +type binaryProcessor struct { + cmd *exec.Cmd + r *os.File + mt string + stderr *bytes.Buffer + + mu sync.Mutex + err error +} + +func (c *binaryProcessor) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *binaryProcessor) wait() { + if err := c.cmd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); ok { + c.mu.Lock() + c.err = errors.New(c.stderr.String()) + c.mu.Unlock() + } + } +} + +func (c *binaryProcessor) File() *os.File { + return c.r +} + +func (c *binaryProcessor) MediaType() string { + return c.mt +} + +func (c *binaryProcessor) Read(p []byte) (int, error) { + return c.r.Read(p) +} + +func (c *binaryProcessor) Close() error { + err := c.r.Close() + if kerr := c.cmd.Process.Kill(); err == nil { + err = kerr + } + return err +} diff --git a/vendor/github.com/containerd/containerd/diff/stream_windows.go b/vendor/github.com/containerd/containerd/diff/stream_windows.go new file mode 100644 index 00000000..8dadd72c --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/stream_windows.go @@ -0,0 +1,165 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sync" + + winio "github.com/Microsoft/go-winio" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const processorPipe = "STREAM_PROCESSOR_PIPE" + +// NewBinaryProcessor returns a binary processor for use with processing content streams +func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) { + cmd := exec.CommandContext(ctx, name, args...) + cmd.Env = os.Environ() + + if payload != nil { + data, err := proto.Marshal(payload) + if err != nil { + return nil, err + } + up, err := getUiqPath() + if err != nil { + return nil, err + } + path := fmt.Sprintf("\\\\.\\pipe\\containerd-processor-%s-pipe", up) + l, err := winio.ListenPipe(path, nil) + if err != nil { + return nil, err + } + go func() { + defer l.Close() + conn, err := l.Accept() + if err != nil { + logrus.WithError(err).Error("accept npipe connection") + return + } + io.Copy(conn, bytes.NewReader(data)) + conn.Close() + }() + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", processorPipe, path)) + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt)) + var ( + stdin io.Reader + closer func() error + err error + ) + if f, ok := stream.(RawProcessor); ok { + stdin = f.File() + closer = f.File().Close + } else { + stdin = stream + } + cmd.Stdin = stdin + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + cmd.Stdout = w + stderr := bytes.NewBuffer(nil) + cmd.Stderr = stderr + + if err := cmd.Start(); err != nil { + return nil, err + } + p := &binaryProcessor{ + cmd: cmd, + r: r, + mt: rmt, + stderr: stderr, + } + go p.wait() + + // close after start and dup + w.Close() + if closer != nil { + closer() + } + return p, nil +} + +type binaryProcessor struct { + cmd *exec.Cmd + r *os.File + mt string + stderr *bytes.Buffer + + mu sync.Mutex + err error +} + +func (c *binaryProcessor) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *binaryProcessor) wait() { + if err := c.cmd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); ok { + c.mu.Lock() + c.err = errors.New(c.stderr.String()) + c.mu.Unlock() + } + } +} + +func (c *binaryProcessor) File() *os.File { + return c.r +} + +func (c *binaryProcessor) MediaType() string { + return c.mt +} + +func (c *binaryProcessor) Read(p []byte) (int, error) { + return c.r.Read(p) +} + +func (c *binaryProcessor) Close() error { + err := c.r.Close() + if kerr := c.cmd.Process.Kill(); err == nil { + err = kerr + } + return err +} + +func getUiqPath() (string, error) { + dir, err := ioutil.TempDir("", "") + if err != nil { + return "", err + } + os.Remove(dir) + return filepath.Base(dir), nil +} diff --git a/vendor/github.com/containerd/containerd/diff/walking/differ.go b/vendor/github.com/containerd/containerd/diff/walking/differ.go new file mode 100644 index 00000000..5ce35910 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/walking/differ.go @@ -0,0 +1,184 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package walking + +import ( + "context" + "encoding/base64" + "fmt" + "io" + "math/rand" + "time" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type walkingDiff struct { + store content.Store +} + +var emptyDesc = ocispec.Descriptor{} +var uncompressed = "containerd.io/uncompressed" + +// NewWalkingDiff is a generic implementation of diff.Comparer. The diff is +// calculated by mounting both the upper and lower mount sets and walking the +// mounted directories concurrently. Changes are calculated by comparing files +// against each other or by comparing file existence between directories. +// NewWalkingDiff uses no special characteristics of the mount sets and is +// expected to work with any filesystem. +func NewWalkingDiff(store content.Store) diff.Comparer { + return &walkingDiff{ + store: store, + } +} + +// Compare creates a diff between the given mounts and uploads the result +// to the content store. +func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) { + var config diff.Config + for _, opt := range opts { + if err := opt(&config); err != nil { + return emptyDesc, err + } + } + + if config.MediaType == "" { + config.MediaType = ocispec.MediaTypeImageLayerGzip + } + + var isCompressed bool + switch config.MediaType { + case ocispec.MediaTypeImageLayer: + case ocispec.MediaTypeImageLayerGzip: + isCompressed = true + default: + return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", config.MediaType) + } + + var ocidesc ocispec.Descriptor + if err := mount.WithTempMount(ctx, lower, func(lowerRoot string) error { + return mount.WithTempMount(ctx, upper, func(upperRoot string) error { + var newReference bool + if config.Reference == "" { + newReference = true + config.Reference = uniqueRef() + } + + cw, err := s.store.Writer(ctx, + content.WithRef(config.Reference), + content.WithDescriptor(ocispec.Descriptor{ + MediaType: config.MediaType, // most contentstore implementations just ignore this + })) + if err != nil { + return errors.Wrap(err, "failed to open writer") + } + defer func() { + if err != nil { + cw.Close() + if newReference { + if err := s.store.Abort(ctx, config.Reference); err != nil { + log.G(ctx).WithField("ref", config.Reference).Warnf("failed to delete diff upload") + } + } + } + }() + if !newReference { + if err = cw.Truncate(0); err != nil { + return err + } + } + + if isCompressed { + dgstr := digest.SHA256.Digester() + var compressed io.WriteCloser + compressed, err = compression.CompressStream(cw, compression.Gzip) + if err != nil { + return errors.Wrap(err, "failed to get compressed stream") + } + err = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash()), lowerRoot, upperRoot) + compressed.Close() + if err != nil { + return errors.Wrap(err, "failed to write compressed diff") + } + + if config.Labels == nil { + config.Labels = map[string]string{} + } + config.Labels[uncompressed] = dgstr.Digest().String() + } else { + if err = archive.WriteDiff(ctx, cw, lowerRoot, upperRoot); err != nil { + return errors.Wrap(err, "failed to write diff") + } + } + + var commitopts []content.Opt + if config.Labels != nil { + commitopts = append(commitopts, content.WithLabels(config.Labels)) + } + + dgst := cw.Digest() + if err := cw.Commit(ctx, 0, dgst, commitopts...); err != nil { + if !errdefs.IsAlreadyExists(err) { + return errors.Wrap(err, "failed to commit") + } + } + + info, err := s.store.Info(ctx, dgst) + if err != nil { + return errors.Wrap(err, "failed to get info from content store") + } + if info.Labels == nil { + info.Labels = make(map[string]string) + } + // Set uncompressed label if digest already existed without label + if _, ok := info.Labels[uncompressed]; !ok { + info.Labels[uncompressed] = config.Labels[uncompressed] + if _, err := s.store.Update(ctx, info, "labels."+uncompressed); err != nil { + return errors.Wrap(err, "error setting uncompressed label") + } + } + + ocidesc = ocispec.Descriptor{ + MediaType: config.MediaType, + Size: info.Size, + Digest: info.Digest, + } + return nil + }) + }); err != nil { + return emptyDesc, err + } + + return ocidesc, nil +} + +func uniqueRef() string { + t := time.Now() + var b [3]byte + // Ignore read failures, just decreases uniqueness + rand.Read(b[:]) + return fmt.Sprintf("%d-%s", t.UnixNano(), base64.URLEncoding.EncodeToString(b[:])) +} diff --git a/vendor/github.com/containerd/containerd/events/events.go b/vendor/github.com/containerd/containerd/events/events.go new file mode 100644 index 00000000..b7eb86f1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/events/events.go @@ -0,0 +1,81 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package events + +import ( + "context" + "time" + + "github.com/containerd/typeurl" + "github.com/gogo/protobuf/types" +) + +// Envelope provides the packaging for an event. +type Envelope struct { + Timestamp time.Time + Namespace string + Topic string + Event *types.Any +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (e *Envelope) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + // unhandled: timestamp + case "namespace": + return e.Namespace, len(e.Namespace) > 0 + case "topic": + return e.Topic, len(e.Topic) > 0 + case "event": + decoded, err := typeurl.UnmarshalAny(e.Event) + if err != nil { + return "", false + } + + adaptor, ok := decoded.(interface { + Field([]string) (string, bool) + }) + if !ok { + return "", false + } + return adaptor.Field(fieldpath[1:]) + } + return "", false +} + +// Event is a generic interface for any type of event +type Event interface{} + +// Publisher posts the event. +type Publisher interface { + Publish(ctx context.Context, topic string, event Event) error +} + +// Forwarder forwards an event to the underlying event bus +type Forwarder interface { + Forward(ctx context.Context, envelope *Envelope) error +} + +// Subscriber allows callers to subscribe to events +type Subscriber interface { + Subscribe(ctx context.Context, filters ...string) (ch <-chan *Envelope, errs <-chan error) +} diff --git a/vendor/github.com/containerd/containerd/events/exchange/exchange.go b/vendor/github.com/containerd/containerd/events/exchange/exchange.go new file mode 100644 index 00000000..eb27bf29 --- /dev/null +++ b/vendor/github.com/containerd/containerd/events/exchange/exchange.go @@ -0,0 +1,251 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package exchange + +import ( + "context" + "strings" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/identifiers" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/typeurl" + goevents "github.com/docker/go-events" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Exchange broadcasts events +type Exchange struct { + broadcaster *goevents.Broadcaster +} + +// NewExchange returns a new event Exchange +func NewExchange() *Exchange { + return &Exchange{ + broadcaster: goevents.NewBroadcaster(), + } +} + +var _ events.Publisher = &Exchange{} +var _ events.Forwarder = &Exchange{} +var _ events.Subscriber = &Exchange{} + +// Forward accepts an envelope to be directly distributed on the exchange. +// +// This is useful when an event is forwarded on behalf of another namespace or +// when the event is propagated on behalf of another publisher. +func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err error) { + if err := validateEnvelope(envelope); err != nil { + return err + } + + defer func() { + logger := log.G(ctx).WithFields(logrus.Fields{ + "topic": envelope.Topic, + "ns": envelope.Namespace, + "type": envelope.Event.TypeUrl, + }) + + if err != nil { + logger.WithError(err).Error("error forwarding event") + } else { + logger.Debug("event forwarded") + } + }() + + return e.broadcaster.Write(envelope) +} + +// Publish packages and sends an event. The caller will be considered the +// initial publisher of the event. This means the timestamp will be calculated +// at this point and this method may read from the calling context. +func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event) (err error) { + var ( + namespace string + encoded *types.Any + envelope events.Envelope + ) + + namespace, err = namespaces.NamespaceRequired(ctx) + if err != nil { + return errors.Wrapf(err, "failed publishing event") + } + if err := validateTopic(topic); err != nil { + return errors.Wrapf(err, "envelope topic %q", topic) + } + + encoded, err = typeurl.MarshalAny(event) + if err != nil { + return err + } + + envelope.Timestamp = time.Now().UTC() + envelope.Namespace = namespace + envelope.Topic = topic + envelope.Event = encoded + + defer func() { + logger := log.G(ctx).WithFields(logrus.Fields{ + "topic": envelope.Topic, + "ns": envelope.Namespace, + "type": envelope.Event.TypeUrl, + }) + + if err != nil { + logger.WithError(err).Error("error publishing event") + } else { + logger.Debug("event published") + } + }() + + return e.broadcaster.Write(&envelope) +} + +// Subscribe to events on the exchange. Events are sent through the returned +// channel ch. If an error is encountered, it will be sent on channel errs and +// errs will be closed. To end the subscription, cancel the provided context. +// +// Zero or more filters may be provided as strings. Only events that match +// *any* of the provided filters will be sent on the channel. The filters use +// the standard containerd filters package syntax. +func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *events.Envelope, errs <-chan error) { + var ( + evch = make(chan *events.Envelope) + errq = make(chan error, 1) + channel = goevents.NewChannel(0) + queue = goevents.NewQueue(channel) + dst goevents.Sink = queue + ) + + closeAll := func() { + channel.Close() + queue.Close() + e.broadcaster.Remove(dst) + close(errq) + } + + ch = evch + errs = errq + + if len(fs) > 0 { + filter, err := filters.ParseAll(fs...) + if err != nil { + errq <- errors.Wrapf(err, "failed parsing subscription filters") + closeAll() + return + } + + dst = goevents.NewFilter(queue, goevents.MatcherFunc(func(gev goevents.Event) bool { + return filter.Match(adapt(gev)) + })) + } + + e.broadcaster.Add(dst) + + go func() { + defer closeAll() + + var err error + loop: + for { + select { + case ev := <-channel.C: + env, ok := ev.(*events.Envelope) + if !ok { + // TODO(stevvooe): For the most part, we are well protected + // from this condition. Both Forward and Publish protect + // from this. + err = errors.Errorf("invalid envelope encountered %#v; please file a bug", ev) + break + } + + select { + case evch <- env: + case <-ctx.Done(): + break loop + } + case <-ctx.Done(): + break loop + } + } + + if err == nil { + if cerr := ctx.Err(); cerr != context.Canceled { + err = cerr + } + } + + errq <- err + }() + + return +} + +func validateTopic(topic string) error { + if topic == "" { + return errors.Wrap(errdefs.ErrInvalidArgument, "must not be empty") + } + + if topic[0] != '/' { + return errors.Wrapf(errdefs.ErrInvalidArgument, "must start with '/'") + } + + if len(topic) == 1 { + return errors.Wrapf(errdefs.ErrInvalidArgument, "must have at least one component") + } + + components := strings.Split(topic[1:], "/") + for _, component := range components { + if err := identifiers.Validate(component); err != nil { + return errors.Wrapf(err, "failed validation on component %q", component) + } + } + + return nil +} + +func validateEnvelope(envelope *events.Envelope) error { + if err := identifiers.Validate(envelope.Namespace); err != nil { + return errors.Wrapf(err, "event envelope has invalid namespace") + } + + if err := validateTopic(envelope.Topic); err != nil { + return errors.Wrapf(err, "envelope topic %q", envelope.Topic) + } + + if envelope.Timestamp.IsZero() { + return errors.Wrapf(errdefs.ErrInvalidArgument, "timestamp must be set on forwarded event") + } + + return nil +} + +func adapt(ev interface{}) filters.Adaptor { + if adaptor, ok := ev.(filters.Adaptor); ok { + return adaptor + } + + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + return "", false + }) +} diff --git a/vendor/github.com/containerd/containerd/filters/adaptor.go b/vendor/github.com/containerd/containerd/filters/adaptor.go new file mode 100644 index 00000000..5a9c559c --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/adaptor.go @@ -0,0 +1,33 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +// Adaptor specifies the mapping of fieldpaths to a type. For the given field +// path, the value and whether it is present should be returned. The mapping of +// the fieldpath to a field is deferred to the adaptor implementation, but +// should generally follow protobuf field path/mask semantics. +type Adaptor interface { + Field(fieldpath []string) (value string, present bool) +} + +// AdapterFunc allows implementation specific matching of fieldpaths +type AdapterFunc func(fieldpath []string) (string, bool) + +// Field returns the field name and true if it exists +func (fn AdapterFunc) Field(fieldpath []string) (string, bool) { + return fn(fieldpath) +} diff --git a/vendor/github.com/containerd/containerd/filters/filter.go b/vendor/github.com/containerd/containerd/filters/filter.go new file mode 100644 index 00000000..cf09d8d9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/filter.go @@ -0,0 +1,179 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package filters defines a syntax and parser that can be used for the +// filtration of items across the containerd API. The core is built on the +// concept of protobuf field paths, with quoting. Several operators allow the +// user to flexibly select items based on field presence, equality, inequality +// and regular expressions. Flexible adaptors support working with any type. +// +// The syntax is fairly familiar, if you've used container ecosystem +// projects. At the core, we base it on the concept of protobuf field +// paths, augmenting with the ability to quote portions of the field path +// to match arbitrary labels. These "selectors" come in the following +// syntax: +// +// ``` +// [] +// ``` +// +// A basic example is as follows: +// +// ``` +// name==foo +// ``` +// +// This would match all objects that have a field `name` with the value +// `foo`. If we only want to test if the field is present, we can omit the +// operator. This is most useful for matching labels in containerd. The +// following will match objects that have the field "labels" and have the +// label "foo" defined: +// +// ``` +// labels.foo +// ``` +// +// We also allow for quoting of parts of the field path to allow matching +// of arbitrary items: +// +// ``` +// labels."very complex label"==something +// ``` +// +// We also define `!=` and `~=` as operators. The `!=` will match all +// objects that don't match the value for a field and `~=` will compile the +// target value as a regular expression and match the field value against that. +// +// Selectors can be combined using a comma, such that the resulting +// selector will require all selectors are matched for the object to match. +// The following example will match objects that are named `foo` and have +// the label `bar`: +// +// ``` +// name==foo,labels.bar +// ``` +// +package filters + +import ( + "regexp" + + "github.com/containerd/containerd/log" +) + +// Filter matches specific resources based the provided filter +type Filter interface { + Match(adaptor Adaptor) bool +} + +// FilterFunc is a function that handles matching with an adaptor +type FilterFunc func(Adaptor) bool + +// Match matches the FilterFunc returning true if the object matches the filter +func (fn FilterFunc) Match(adaptor Adaptor) bool { + return fn(adaptor) +} + +// Always is a filter that always returns true for any type of object +var Always FilterFunc = func(adaptor Adaptor) bool { + return true +} + +// Any allows multiple filters to be matched against the object +type Any []Filter + +// Match returns true if any of the provided filters are true +func (m Any) Match(adaptor Adaptor) bool { + for _, m := range m { + if m.Match(adaptor) { + return true + } + } + + return false +} + +// All allows multiple filters to be matched against the object +type All []Filter + +// Match only returns true if all filters match the object +func (m All) Match(adaptor Adaptor) bool { + for _, m := range m { + if !m.Match(adaptor) { + return false + } + } + + return true +} + +type operator int + +const ( + operatorPresent = iota + operatorEqual + operatorNotEqual + operatorMatches +) + +func (op operator) String() string { + switch op { + case operatorPresent: + return "?" + case operatorEqual: + return "==" + case operatorNotEqual: + return "!=" + case operatorMatches: + return "~=" + } + + return "unknown" +} + +type selector struct { + fieldpath []string + operator operator + value string + re *regexp.Regexp +} + +func (m selector) Match(adaptor Adaptor) bool { + value, present := adaptor.Field(m.fieldpath) + + switch m.operator { + case operatorPresent: + return present + case operatorEqual: + return present && value == m.value + case operatorNotEqual: + return value != m.value + case operatorMatches: + if m.re == nil { + r, err := regexp.Compile(m.value) + if err != nil { + log.L.Errorf("error compiling regexp %q", m.value) + return false + } + + m.re = r + } + + return m.re.MatchString(value) + default: + return false + } +} diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go new file mode 100644 index 00000000..0825d668 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/parser.go @@ -0,0 +1,292 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "fmt" + "io" + + "github.com/containerd/containerd/errdefs" + "github.com/pkg/errors" +) + +/* +Parse the strings into a filter that may be used with an adaptor. + +The filter is made up of zero or more selectors. + +The format is a comma separated list of expressions, in the form of +``, known as selectors. All selectors must match the +target object for the filter to be true. + +We define the operators "==" for equality, "!=" for not equal and "~=" for a +regular expression. If the operator and value are not present, the matcher will +test for the presence of a value, as defined by the target object. + +The formal grammar is as follows: + +selectors := selector ("," selector)* +selector := fieldpath (operator value) +fieldpath := field ('.' field)* +field := quoted | [A-Za-z] [A-Za-z0-9_]+ +operator := "==" | "!=" | "~=" +value := quoted | [^\s,]+ +quoted := + +*/ +func Parse(s string) (Filter, error) { + // special case empty to match all + if s == "" { + return Always, nil + } + + p := parser{input: s} + return p.parse() +} + +// ParseAll parses each filter in ss and returns a filter that will return true +// if any filter matches the expression. +// +// If no filters are provided, the filter will match anything. +func ParseAll(ss ...string) (Filter, error) { + if len(ss) == 0 { + return Always, nil + } + + var fs []Filter + for _, s := range ss { + f, err := Parse(s) + if err != nil { + return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) + } + + fs = append(fs, f) + } + + return Any(fs), nil +} + +type parser struct { + input string + scanner scanner +} + +func (p *parser) parse() (Filter, error) { + p.scanner.init(p.input) + + ss, err := p.selectors() + if err != nil { + return nil, errors.Wrap(err, "filters") + } + + return ss, nil +} + +func (p *parser) selectors() (Filter, error) { + s, err := p.selector() + if err != nil { + return nil, err + } + + ss := All{s} + +loop: + for { + tok := p.scanner.peek() + switch tok { + case ',': + pos, tok, _ := p.scanner.scan() + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a separator") + } + + s, err := p.selector() + if err != nil { + return nil, err + } + + ss = append(ss, s) + case tokenEOF: + break loop + default: + return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok)) + } + } + + return ss, nil +} + +func (p *parser) selector() (selector, error) { + fieldpath, err := p.fieldpath() + if err != nil { + return selector{}, err + } + + switch p.scanner.peek() { + case ',', tokenSeparator, tokenEOF: + return selector{ + fieldpath: fieldpath, + operator: operatorPresent, + }, nil + } + + op, err := p.operator() + if err != nil { + return selector{}, err + } + + var allowAltQuotes bool + if op == operatorMatches { + allowAltQuotes = true + } + + value, err := p.value(allowAltQuotes) + if err != nil { + if err == io.EOF { + return selector{}, io.ErrUnexpectedEOF + } + return selector{}, err + } + + return selector{ + fieldpath: fieldpath, + value: value, + operator: op, + }, nil +} + +func (p *parser) fieldpath() ([]string, error) { + f, err := p.field() + if err != nil { + return nil, err + } + + fs := []string{f} +loop: + for { + tok := p.scanner.peek() // lookahead to consume field separator + + switch tok { + case '.': + pos, tok, _ := p.scanner.scan() // consume separator + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a field separator (`.`)") + } + + f, err := p.field() + if err != nil { + return nil, err + } + + fs = append(fs, f) + default: + // let the layer above handle the other bad cases. + break loop + } + } + + return fs, nil +} + +func (p *parser) field() (string, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, false) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected field or quoted") +} + +func (p *parser) operator() (operator, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenOperator: + switch s { + case "==": + return operatorEqual, nil + case "!=": + return operatorNotEqual, nil + case "~=": + return operatorMatches, nil + default: + return 0, p.mkerr(pos, "unsupported operator %q", s) + } + case tokenIllegal: + return 0, p.mkerr(pos, p.scanner.err) + } + + return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`) +} + +func (p *parser) value(allowAltQuotes bool) (string, error) { + pos, tok, s := p.scanner.scan() + + switch tok { + case tokenValue, tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, allowAltQuotes) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected value or quoted") +} + +func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) { + if !allowAlts && s[0] != '\'' && s[0] != '"' { + return "", p.mkerr(pos, "invalid quote encountered") + } + + uq, err := unquote(s) + if err != nil { + return "", p.mkerr(pos, "unquoting failed: %v", err) + } + + return uq, nil +} + +type parseError struct { + input string + pos int + msg string +} + +func (pe parseError) Error() string { + if pe.pos < len(pe.input) { + before := pe.input[:pe.pos] + location := pe.input[pe.pos : pe.pos+1] // need to handle end + after := pe.input[pe.pos+1:] + + return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg) + } + + return fmt.Sprintf("[%s]: %v", pe.input, pe.msg) +} + +func (p *parser) mkerr(pos int, format string, args ...interface{}) error { + return errors.Wrap(parseError{ + input: p.input, + pos: pos, + msg: fmt.Sprintf(format, args...), + }, "parse error") +} diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go new file mode 100644 index 00000000..2d64e23a --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/quote.go @@ -0,0 +1,253 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "unicode/utf8" + + "github.com/pkg/errors" +) + +// NOTE(stevvooe): Most of this code in this file is copied from the stdlib +// strconv package and modified to be able to handle quoting with `/` and `|` +// as delimiters. The copyright is held by the Go authors. + +var errQuoteSyntax = errors.New("quote syntax error") + +// UnquoteChar decodes the first character or byte in the escaped string +// or character literal represented by the string s. +// It returns four values: +// +// 1) value, the decoded Unicode code point or byte value; +// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +// 3) tail, the remainder of the string after the character; and +// 4) an error that will be nil if the character is syntactically valid. +// +// The second argument, quote, specifies the type of literal being parsed +// and therefore which escaped quote character is permitted. +// If set to a single quote, it permits the sequence \' and disallows unescaped '. +// If set to a double quote, it permits \" and disallows unescaped ". +// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped. +// +// This is from Go strconv package, modified to support `|` and `/` as double +// quotes for use with regular expressions. +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'): + err = errQuoteSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = errQuoteSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = errQuoteSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = errQuoteSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = errQuoteSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = errQuoteSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = errQuoteSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = errQuoteSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"', '|', '/': + if c != quote { + err = errQuoteSyntax + return + } + value = rune(c) + default: + err = errQuoteSyntax + return + } + tail = s + return +} + +// unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +// +// This is modified from the standard library to support `|` and `/` as quote +// characters for use with regular expressions. +func unquote(s string) (string, error) { + n := len(s) + if n < 2 { + return "", errQuoteSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", errQuoteSyntax + } + s = s[1 : n-1] + + if quote == '`' { + if contains(s, '`') { + return "", errQuoteSyntax + } + if contains(s, '\r') { + // -1 because we know there is at least one \r to remove. + buf := make([]byte, 0, len(s)-1) + for i := 0; i < len(s); i++ { + if s[i] != '\r' { + buf = append(buf, s[i]) + } + } + return string(buf), nil + } + return s, nil + } + if quote != '"' && quote != '\'' && quote != '|' && quote != '/' { + return "", errQuoteSyntax + } + if contains(s, '\n') { + return "", errQuoteSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) { + switch quote { + case '"', '/', '|': // pipe and slash are treated like double quote + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", errQuoteSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go new file mode 100644 index 00000000..6a485467 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/scanner.go @@ -0,0 +1,297 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "unicode" + "unicode/utf8" +) + +const ( + tokenEOF = -(iota + 1) + tokenQuoted + tokenValue + tokenField + tokenSeparator + tokenOperator + tokenIllegal +) + +type token rune + +func (t token) String() string { + switch t { + case tokenEOF: + return "EOF" + case tokenQuoted: + return "Quoted" + case tokenValue: + return "Value" + case tokenField: + return "Field" + case tokenSeparator: + return "Separator" + case tokenOperator: + return "Operator" + case tokenIllegal: + return "Illegal" + } + + return string(t) +} + +func (t token) GoString() string { + return "token" + t.String() +} + +type scanner struct { + input string + pos int + ppos int // bounds the current rune in the string + value bool + err string +} + +func (s *scanner) init(input string) { + s.input = input + s.pos = 0 + s.ppos = 0 +} + +func (s *scanner) next() rune { + if s.pos >= len(s.input) { + return tokenEOF + } + s.pos = s.ppos + + r, w := utf8.DecodeRuneInString(s.input[s.ppos:]) + s.ppos += w + if r == utf8.RuneError { + if w > 0 { + s.error("rune error") + return tokenIllegal + } + return tokenEOF + } + + if r == 0 { + s.error("unexpected null") + return tokenIllegal + } + + return r +} + +func (s *scanner) peek() rune { + pos := s.pos + ppos := s.ppos + ch := s.next() + s.pos = pos + s.ppos = ppos + return ch +} + +func (s *scanner) scan() (nextp int, tk token, text string) { + var ( + ch = s.next() + pos = s.pos + ) + +chomp: + switch { + case ch == tokenEOF: + case ch == tokenIllegal: + case isQuoteRune(ch): + if !s.scanQuoted(ch) { + return pos, tokenIllegal, s.input[pos:s.ppos] + } + return pos, tokenQuoted, s.input[pos:s.ppos] + case isSeparatorRune(ch): + s.value = false + return pos, tokenSeparator, s.input[pos:s.ppos] + case isOperatorRune(ch): + s.scanOperator() + s.value = true + return pos, tokenOperator, s.input[pos:s.ppos] + case unicode.IsSpace(ch): + // chomp + ch = s.next() + pos = s.pos + goto chomp + case s.value: + s.scanValue() + s.value = false + return pos, tokenValue, s.input[pos:s.ppos] + case isFieldRune(ch): + s.scanField() + return pos, tokenField, s.input[pos:s.ppos] + } + + return s.pos, token(ch), "" +} + +func (s *scanner) scanField() { + for { + ch := s.peek() + if !isFieldRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanOperator() { + for { + ch := s.peek() + switch ch { + case '=', '!', '~': + s.next() + default: + return + } + } +} + +func (s *scanner) scanValue() { + for { + ch := s.peek() + if !isValueRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanQuoted(quote rune) bool { + var illegal bool + ch := s.next() // read character after quote + for ch != quote { + if ch == '\n' || ch < 0 { + s.error("quoted literal not terminated") + return false + } + if ch == '\\' { + var legal bool + ch, legal = s.scanEscape(quote) + if !legal { + illegal = true + } + } else { + ch = s.next() + } + } + return !illegal +} + +func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) { + ch = s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: + // nothing to do + ch = s.next() + legal = true + case '0', '1', '2', '3', '4', '5', '6', '7': + ch, legal = s.scanDigits(ch, 8, 3) + case 'x': + ch, legal = s.scanDigits(s.next(), 16, 2) + case 'u': + ch, legal = s.scanDigits(s.next(), 16, 4) + case 'U': + ch, legal = s.scanDigits(s.next(), 16, 8) + default: + s.error("illegal escape sequence") + } + return +} + +func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.error("illegal numeric escape sequence") + return ch, false + } + return ch, true +} + +func (s *scanner) error(msg string) { + if s.err == "" { + s.err = msg + } +} + +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} + +func isFieldRune(r rune) bool { + return (r == '_' || isAlphaRune(r) || isDigitRune(r)) +} + +func isAlphaRune(r rune) bool { + return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' +} + +func isDigitRune(r rune) bool { + return r >= '0' && r <= '9' +} + +func isOperatorRune(r rune) bool { + switch r { + case '=', '!', '~': + return true + } + + return false +} + +func isQuoteRune(r rune) bool { + switch r { + case '/', '|', '"': // maybe add single quoting? + return true + } + + return false +} + +func isSeparatorRune(r rune) bool { + switch r { + case ',', '.': + return true + } + + return false +} + +func isValueRune(r rune) bool { + return r != ',' && !unicode.IsSpace(r) && + (unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsNumber(r) || + unicode.IsGraphic(r) || + unicode.IsPunct(r)) +} diff --git a/vendor/github.com/containerd/containerd/gc/gc.go b/vendor/github.com/containerd/containerd/gc/gc.go new file mode 100644 index 00000000..4f71cb30 --- /dev/null +++ b/vendor/github.com/containerd/containerd/gc/gc.go @@ -0,0 +1,189 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package gc experiments with providing central gc tooling to ensure +// deterministic resource removal within containerd. +// +// For now, we just have a single exported implementation that can be used +// under certain use cases. +package gc + +import ( + "context" + "sync" + "time" +) + +// ResourceType represents type of resource at a node +type ResourceType uint8 + +// ResourceMax represents the max resource. +// Upper bits are stripped out during the mark phase, allowing the upper 3 bits +// to be used by the caller reference function. +const ResourceMax = ResourceType(0x1F) + +// Node presents a resource which has a type and key, +// this node can be used to lookup other nodes. +type Node struct { + Type ResourceType + Namespace string + Key string +} + +// Stats about a garbage collection run +type Stats interface { + Elapsed() time.Duration +} + +// Tricolor implements basic, single-thread tri-color GC. Given the roots, the +// complete set and a refs function, this function returns a map of all +// reachable objects. +// +// Correct usage requires that the caller not allow the arguments to change +// until the result is used to delete objects in the system. +// +// It will allocate memory proportional to the size of the reachable set. +// +// We can probably use this to inform a design for incremental GC by injecting +// callbacks to the set modification algorithms. +func Tricolor(roots []Node, refs func(ref Node) ([]Node, error)) (map[Node]struct{}, error) { + var ( + grays []Node // maintain a gray "stack" + seen = map[Node]struct{}{} // or not "white", basically "seen" + reachable = map[Node]struct{}{} // or "black", in tri-color parlance + ) + + grays = append(grays, roots...) + + for len(grays) > 0 { + // Pick any gray object + id := grays[len(grays)-1] // effectively "depth first" because first element + grays = grays[:len(grays)-1] + seen[id] = struct{}{} // post-mark this as not-white + rs, err := refs(id) + if err != nil { + return nil, err + } + + // mark all the referenced objects as gray + for _, target := range rs { + if _, ok := seen[target]; !ok { + grays = append(grays, target) + } + } + + // strip bits above max resource type + id.Type = id.Type & ResourceMax + // mark as black when done + reachable[id] = struct{}{} + } + + return reachable, nil +} + +// ConcurrentMark implements simple, concurrent GC. All the roots are scanned +// and the complete set of references is formed by calling the refs function +// for each seen object. This function returns a map of all object reachable +// from a root. +// +// Correct usage requires that the caller not allow the arguments to change +// until the result is used to delete objects in the system. +// +// It will allocate memory proportional to the size of the reachable set. +func ConcurrentMark(ctx context.Context, root <-chan Node, refs func(context.Context, Node, func(Node)) error) (map[Node]struct{}, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + var ( + grays = make(chan Node) + seen = map[Node]struct{}{} // or not "white", basically "seen" + wg sync.WaitGroup + + errOnce sync.Once + refErr error + ) + + go func() { + for gray := range grays { + if _, ok := seen[gray]; ok { + wg.Done() + continue + } + seen[gray] = struct{}{} // post-mark this as non-white + + go func(gray Node) { + defer wg.Done() + + send := func(n Node) { + wg.Add(1) + select { + case grays <- n: + case <-ctx.Done(): + wg.Done() + } + } + + if err := refs(ctx, gray, send); err != nil { + errOnce.Do(func() { + refErr = err + cancel() + }) + } + + }(gray) + } + }() + + for r := range root { + wg.Add(1) + select { + case grays <- r: + case <-ctx.Done(): + wg.Done() + } + + } + + // Wait for outstanding grays to be processed + wg.Wait() + + close(grays) + + if refErr != nil { + return nil, refErr + } + if cErr := ctx.Err(); cErr != nil { + return nil, cErr + } + + return seen, nil +} + +// Sweep removes all nodes returned through the slice which are not in +// the reachable set by calling the provided remove function. +func Sweep(reachable map[Node]struct{}, all []Node, remove func(Node) error) error { + // All black objects are now reachable, and all white objects are + // unreachable. Free those that are white! + for _, node := range all { + if _, ok := reachable[node]; !ok { + if err := remove(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/containerd/containerd/identifiers/validate.go b/vendor/github.com/containerd/containerd/identifiers/validate.go new file mode 100644 index 00000000..f52317b4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/identifiers/validate.go @@ -0,0 +1,73 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package identifiers provides common validation for identifiers and keys +// across containerd. +// +// Identifiers in containerd must be a alphanumeric, allowing limited +// underscores, dashes and dots. +// +// While the character set may be expanded in the future, identifiers +// are guaranteed to be safely used as filesystem path components. +package identifiers + +import ( + "regexp" + + "github.com/containerd/containerd/errdefs" + "github.com/pkg/errors" +) + +const ( + maxLength = 76 + alphanum = `[A-Za-z0-9]+` + separators = `[._-]` +) + +var ( + // identifierRe defines the pattern for valid identifiers. + identifierRe = regexp.MustCompile(reAnchor(alphanum + reGroup(separators+reGroup(alphanum)) + "*")) +) + +// Validate returns nil if the string s is a valid identifier. +// +// identifiers are similar to the domain name rules according to RFC 1035, section 2.3.1. However +// rules in this package are relaxed to allow numerals to follow period (".") and mixed case is +// allowed. +// +// In general identifiers that pass this validation should be safe for use as filesystem path components. +func Validate(s string) error { + if len(s) == 0 { + return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier must not be empty") + } + + if len(s) > maxLength { + return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q greater than maximum length (%d characters)", s, maxLength) + } + + if !identifierRe.MatchString(s) { + return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q must match %v", s, identifierRe) + } + return nil +} + +func reGroup(s string) string { + return `(?:` + s + `)` +} + +func reAnchor(s string) string { + return `^` + s + `$` +} diff --git a/vendor/github.com/containerd/containerd/images/annotations.go b/vendor/github.com/containerd/containerd/images/annotations.go new file mode 100644 index 00000000..47d92104 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/annotations.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +const ( + // AnnotationImageName is an annotation on a Descriptor in an index.json + // containing the `Name` value as used by an `Image` struct + AnnotationImageName = "io.containerd.image.name" +) diff --git a/vendor/github.com/containerd/containerd/images/archive/exporter.go b/vendor/github.com/containerd/containerd/images/archive/exporter.go new file mode 100644 index 00000000..c9d3f6ec --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/archive/exporter.go @@ -0,0 +1,468 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "archive/tar" + "context" + "encoding/json" + "io" + "path" + "sort" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type exportOptions struct { + manifests []ocispec.Descriptor + platform platforms.MatchComparer + allPlatforms bool + skipDockerManifest bool +} + +// ExportOpt defines options for configuring exported descriptors +type ExportOpt func(context.Context, *exportOptions) error + +// WithPlatform defines the platform to require manifest lists have +// not exporting all platforms. +// Additionally, platform is used to resolve image configs for +// Docker v1.1, v1.2 format compatibility. +func WithPlatform(p platforms.MatchComparer) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + o.platform = p + return nil + } +} + +// WithAllPlatforms exports all manifests from a manifest list. +// Missing content will fail the export. +func WithAllPlatforms() ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + o.allPlatforms = true + return nil + } +} + +// WithSkipDockerManifest skips creation of the Docker compatible +// manifest.json file. +func WithSkipDockerManifest() ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + o.skipDockerManifest = true + return nil + } +} + +// WithImage adds the provided images to the exported archive. +func WithImage(is images.Store, name string) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + img, err := is.Get(ctx, name) + if err != nil { + return err + } + + img.Target.Annotations = addNameAnnotation(name, img.Target.Annotations) + o.manifests = append(o.manifests, img.Target) + + return nil + } +} + +// WithManifest adds a manifest to the exported archive. +// When names are given they will be set on the manifest in the +// exported archive, creating an index record for each name. +// When no names are provided, it is up to caller to put name annotation to +// on the manifest descriptor if needed. +func WithManifest(manifest ocispec.Descriptor, names ...string) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + if len(names) == 0 { + o.manifests = append(o.manifests, manifest) + } + for _, name := range names { + mc := manifest + mc.Annotations = addNameAnnotation(name, manifest.Annotations) + o.manifests = append(o.manifests, mc) + } + + return nil + } +} + +func addNameAnnotation(name string, base map[string]string) map[string]string { + annotations := map[string]string{} + for k, v := range base { + annotations[k] = v + } + + annotations[images.AnnotationImageName] = name + annotations[ocispec.AnnotationRefName] = ociReferenceName(name) + + return annotations +} + +// Export implements Exporter. +func Export(ctx context.Context, store content.Provider, writer io.Writer, opts ...ExportOpt) error { + var eo exportOptions + for _, opt := range opts { + if err := opt(ctx, &eo); err != nil { + return err + } + } + + records := []tarRecord{ + ociLayoutFile(""), + ociIndexRecord(eo.manifests), + } + + algorithms := map[string]struct{}{} + dManifests := map[digest.Digest]*exportManifest{} + resolvedIndex := map[digest.Digest]digest.Digest{} + for _, desc := range eo.manifests { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + mt, ok := dManifests[desc.Digest] + if !ok { + // TODO(containerd): Skip if already added + r, err := getRecords(ctx, store, desc, algorithms) + if err != nil { + return err + } + records = append(records, r...) + + mt = &exportManifest{ + manifest: desc, + } + dManifests[desc.Digest] = mt + } + + name := desc.Annotations[images.AnnotationImageName] + if name != "" && !eo.skipDockerManifest { + mt.names = append(mt.names, name) + } + case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + d, ok := resolvedIndex[desc.Digest] + if !ok { + records = append(records, blobRecord(store, desc)) + + p, err := content.ReadBlob(ctx, store, desc) + if err != nil { + return err + } + + var index ocispec.Index + if err := json.Unmarshal(p, &index); err != nil { + return err + } + + var manifests []ocispec.Descriptor + for _, m := range index.Manifests { + if eo.platform != nil { + if m.Platform == nil || eo.platform.Match(*m.Platform) { + manifests = append(manifests, m) + } else if !eo.allPlatforms { + continue + } + } + + r, err := getRecords(ctx, store, m, algorithms) + if err != nil { + return err + } + + records = append(records, r...) + } + + if !eo.skipDockerManifest { + if len(manifests) >= 1 { + if len(manifests) > 1 { + sort.SliceStable(manifests, func(i, j int) bool { + if manifests[i].Platform == nil { + return false + } + if manifests[j].Platform == nil { + return true + } + return eo.platform.Less(*manifests[i].Platform, *manifests[j].Platform) + }) + } + d = manifests[0].Digest + dManifests[d] = &exportManifest{ + manifest: manifests[0], + } + } else if eo.platform != nil { + return errors.Wrap(errdefs.ErrNotFound, "no manifest found for platform") + } + } + resolvedIndex[desc.Digest] = d + } + if d != "" { + if name := desc.Annotations[images.AnnotationImageName]; name != "" { + mt := dManifests[d] + mt.names = append(mt.names, name) + } + + } + default: + return errors.Wrap(errdefs.ErrInvalidArgument, "only manifests may be exported") + } + } + + if len(dManifests) > 0 { + tr, err := manifestsRecord(ctx, store, dManifests) + if err != nil { + return errors.Wrap(err, "unable to create manifests file") + } + + records = append(records, tr) + } + + if len(algorithms) > 0 { + records = append(records, directoryRecord("blobs/", 0755)) + for alg := range algorithms { + records = append(records, directoryRecord("blobs/"+alg+"/", 0755)) + } + } + + tw := tar.NewWriter(writer) + defer tw.Close() + return writeTar(ctx, tw, records) +} + +func getRecords(ctx context.Context, store content.Provider, desc ocispec.Descriptor, algorithms map[string]struct{}) ([]tarRecord, error) { + var records []tarRecord + exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + records = append(records, blobRecord(store, desc)) + algorithms[desc.Digest.Algorithm().String()] = struct{}{} + return nil, nil + } + + childrenHandler := images.ChildrenHandler(store) + + handlers := images.Handlers( + childrenHandler, + images.HandlerFunc(exportHandler), + ) + + // Walk sequentially since the number of fetches is likely one and doing in + // parallel requires locking the export handler + if err := images.Walk(ctx, handlers, desc); err != nil { + return nil, err + } + + return records, nil +} + +type tarRecord struct { + Header *tar.Header + CopyTo func(context.Context, io.Writer) (int64, error) +} + +func blobRecord(cs content.Provider, desc ocispec.Descriptor) tarRecord { + path := path.Join("blobs", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + return tarRecord{ + Header: &tar.Header{ + Name: path, + Mode: 0444, + Size: desc.Size, + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + r, err := cs.ReaderAt(ctx, desc) + if err != nil { + return 0, errors.Wrap(err, "failed to get reader") + } + defer r.Close() + + // Verify digest + dgstr := desc.Digest.Algorithm().Digester() + + n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r)) + if err != nil { + return 0, errors.Wrap(err, "failed to copy to tar") + } + if dgstr.Digest() != desc.Digest { + return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest()) + } + return n, nil + }, + } +} + +func directoryRecord(name string, mode int64) tarRecord { + return tarRecord{ + Header: &tar.Header{ + Name: name, + Mode: mode, + Typeflag: tar.TypeDir, + }, + } +} + +func ociLayoutFile(version string) tarRecord { + if version == "" { + version = ocispec.ImageLayoutVersion + } + layout := ocispec.ImageLayout{ + Version: version, + } + + b, err := json.Marshal(layout) + if err != nil { + panic(err) + } + + return tarRecord{ + Header: &tar.Header{ + Name: ocispec.ImageLayoutFile, + Mode: 0444, + Size: int64(len(b)), + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + n, err := w.Write(b) + return int64(n), err + }, + } + +} + +func ociIndexRecord(manifests []ocispec.Descriptor) tarRecord { + index := ocispec.Index{ + Versioned: ocispecs.Versioned{ + SchemaVersion: 2, + }, + Manifests: manifests, + } + + b, err := json.Marshal(index) + if err != nil { + panic(err) + } + + return tarRecord{ + Header: &tar.Header{ + Name: "index.json", + Mode: 0644, + Size: int64(len(b)), + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + n, err := w.Write(b) + return int64(n), err + }, + } +} + +type exportManifest struct { + manifest ocispec.Descriptor + names []string +} + +func manifestsRecord(ctx context.Context, store content.Provider, manifests map[digest.Digest]*exportManifest) (tarRecord, error) { + mfsts := make([]struct { + Config string + RepoTags []string + Layers []string + }, len(manifests)) + + var i int + for _, m := range manifests { + p, err := content.ReadBlob(ctx, store, m.manifest) + if err != nil { + return tarRecord{}, err + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return tarRecord{}, err + } + if err := manifest.Config.Digest.Validate(); err != nil { + return tarRecord{}, errors.Wrapf(err, "invalid manifest %q", m.manifest.Digest) + } + + dgst := manifest.Config.Digest + mfsts[i].Config = path.Join("blobs", dgst.Algorithm().String(), dgst.Encoded()) + for _, l := range manifest.Layers { + path := path.Join("blobs", l.Digest.Algorithm().String(), l.Digest.Encoded()) + mfsts[i].Layers = append(mfsts[i].Layers, path) + } + + for _, name := range m.names { + nname, err := familiarizeReference(name) + if err != nil { + return tarRecord{}, err + } + + mfsts[i].RepoTags = append(mfsts[i].RepoTags, nname) + } + + i++ + } + + b, err := json.Marshal(mfsts) + if err != nil { + return tarRecord{}, err + } + + return tarRecord{ + Header: &tar.Header{ + Name: "manifest.json", + Mode: 0644, + Size: int64(len(b)), + Typeflag: tar.TypeReg, + }, + CopyTo: func(ctx context.Context, w io.Writer) (int64, error) { + n, err := w.Write(b) + return int64(n), err + }, + }, nil +} + +func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error { + sort.Slice(records, func(i, j int) bool { + return records[i].Header.Name < records[j].Header.Name + }) + + var last string + for _, record := range records { + if record.Header.Name == last { + continue + } + last = record.Header.Name + if err := tw.WriteHeader(record.Header); err != nil { + return err + } + if record.CopyTo != nil { + n, err := record.CopyTo(ctx, tw) + if err != nil { + return err + } + if n != record.Header.Size { + return errors.Errorf("unexpected copy size for %s", record.Header.Name) + } + } else if record.Header.Size > 0 { + return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name) + } + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/images/archive/importer.go b/vendor/github.com/containerd/containerd/images/archive/importer.go new file mode 100644 index 00000000..2d046589 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/archive/importer.go @@ -0,0 +1,371 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package archive provides a Docker and OCI compatible importer +package archive + +import ( + "archive/tar" + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "path" + + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type importOpts struct { + compress bool +} + +// ImportOpt is an option for importing an OCI index +type ImportOpt func(*importOpts) error + +// WithImportCompression compresses uncompressed layers on import. +// This is used for import formats which do not include the manifest. +func WithImportCompression() ImportOpt { + return func(io *importOpts) error { + io.compress = true + return nil + } +} + +// ImportIndex imports an index from a tar archive image bundle +// - implements Docker v1.1, v1.2 and OCI v1. +// - prefers OCI v1 when provided +// - creates OCI index for Docker formats +// - normalizes Docker references and adds as OCI ref name +// e.g. alpine:latest -> docker.io/library/alpine:latest +// - existing OCI reference names are untouched +func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) { + var ( + tr = tar.NewReader(reader) + + ociLayout ocispec.ImageLayout + mfsts []struct { + Config string + RepoTags []string + Layers []string + } + symlinks = make(map[string]string) + blobs = make(map[string]ocispec.Descriptor) + iopts importOpts + ) + + for _, o := range opts { + if err := o(&iopts); err != nil { + return ocispec.Descriptor{}, err + } + } + + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return ocispec.Descriptor{}, err + } + if hdr.Typeflag == tar.TypeSymlink { + symlinks[hdr.Name] = path.Join(path.Dir(hdr.Name), hdr.Linkname) + } + + if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA { + if hdr.Typeflag != tar.TypeDir { + log.G(ctx).WithField("file", hdr.Name).Debug("file type ignored") + } + continue + } + + hdrName := path.Clean(hdr.Name) + if hdrName == ocispec.ImageLayoutFile { + if err = onUntarJSON(tr, &ociLayout); err != nil { + return ocispec.Descriptor{}, errors.Wrapf(err, "untar oci layout %q", hdr.Name) + } + } else if hdrName == "manifest.json" { + if err = onUntarJSON(tr, &mfsts); err != nil { + return ocispec.Descriptor{}, errors.Wrapf(err, "untar manifest %q", hdr.Name) + } + } else { + dgst, err := onUntarBlob(ctx, tr, store, hdr.Size, "tar-"+hdrName) + if err != nil { + return ocispec.Descriptor{}, errors.Wrapf(err, "failed to ingest %q", hdr.Name) + } + + blobs[hdrName] = ocispec.Descriptor{ + Digest: dgst, + Size: hdr.Size, + } + } + } + + // If OCI layout was given, interpret the tar as an OCI layout. + // When not provided, the layout of the tar will be interpreted + // as Docker v1.1 or v1.2. + if ociLayout.Version != "" { + if ociLayout.Version != ocispec.ImageLayoutVersion { + return ocispec.Descriptor{}, errors.Errorf("unsupported OCI version %s", ociLayout.Version) + } + + idx, ok := blobs["index.json"] + if !ok { + return ocispec.Descriptor{}, errors.Errorf("missing index.json in OCI layout %s", ocispec.ImageLayoutVersion) + } + + idx.MediaType = ocispec.MediaTypeImageIndex + return idx, nil + } + + if mfsts == nil { + return ocispec.Descriptor{}, errors.Errorf("unrecognized image format") + } + + for name, linkname := range symlinks { + desc, ok := blobs[linkname] + if !ok { + return ocispec.Descriptor{}, errors.Errorf("no target for symlink layer from %q to %q", name, linkname) + } + blobs[name] = desc + } + + idx := ocispec.Index{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + } + for _, mfst := range mfsts { + config, ok := blobs[mfst.Config] + if !ok { + return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config) + } + config.MediaType = images.MediaTypeDockerSchema2Config + + layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers") + } + + manifest := struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Config ocispec.Descriptor `json:"config"` + Layers []ocispec.Descriptor `json:"layers"` + }{ + SchemaVersion: 2, + MediaType: images.MediaTypeDockerSchema2Manifest, + Config: config, + Layers: layers, + } + + desc, err := writeManifest(ctx, store, manifest, manifest.MediaType) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "write docker manifest") + } + + platforms, err := images.Platforms(ctx, store, desc) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "unable to resolve platform") + } + if len(platforms) > 0 { + // Only one platform can be resolved from non-index manifest, + // The platform can only come from the config included above, + // if the config has no platform it can be safely omitted. + desc.Platform = &platforms[0] + } + + if len(mfst.RepoTags) == 0 { + idx.Manifests = append(idx.Manifests, desc) + } else { + // Add descriptor per tag + for _, ref := range mfst.RepoTags { + mfstdesc := desc + + normalized, err := normalizeReference(ref) + if err != nil { + return ocispec.Descriptor{}, err + } + + mfstdesc.Annotations = map[string]string{ + images.AnnotationImageName: normalized, + ocispec.AnnotationRefName: ociReferenceName(normalized), + } + + idx.Manifests = append(idx.Manifests, mfstdesc) + } + } + } + + return writeManifest(ctx, store, idx, ocispec.MediaTypeImageIndex) +} + +func onUntarJSON(r io.Reader, j interface{}) error { + b, err := ioutil.ReadAll(r) + if err != nil { + return err + } + return json.Unmarshal(b, j) +} + +func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size int64, ref string) (digest.Digest, error) { + dgstr := digest.Canonical.Digester() + + if err := content.WriteBlob(ctx, store, ref, io.TeeReader(r, dgstr.Hash()), ocispec.Descriptor{Size: size}); err != nil { + return "", err + } + + return dgstr.Digest(), nil +} + +func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor, compress bool) ([]ocispec.Descriptor, error) { + layers := make([]ocispec.Descriptor, len(layerFiles)) + descs := map[digest.Digest]*ocispec.Descriptor{} + filters := []string{} + for i, f := range layerFiles { + desc, ok := blobs[f] + if !ok { + return nil, errors.Errorf("layer %q not found", f) + } + layers[i] = desc + descs[desc.Digest] = &layers[i] + filters = append(filters, "labels.\"containerd.io/uncompressed\"=="+desc.Digest.String()) + } + + err := store.Walk(ctx, func(info content.Info) error { + dgst, ok := info.Labels["containerd.io/uncompressed"] + if ok { + desc := descs[digest.Digest(dgst)] + if desc != nil { + desc.MediaType = images.MediaTypeDockerSchema2LayerGzip + desc.Digest = info.Digest + desc.Size = info.Size + } + } + return nil + }, filters...) + if err != nil { + return nil, errors.Wrap(err, "failure checking for compressed blobs") + } + + for i, desc := range layers { + if desc.MediaType != "" { + continue + } + // Open blob, resolve media type + ra, err := store.ReaderAt(ctx, desc) + if err != nil { + return nil, errors.Wrapf(err, "failed to open %q (%s)", layerFiles[i], desc.Digest) + } + s, err := compression.DecompressStream(content.NewReader(ra)) + if err != nil { + return nil, errors.Wrapf(err, "failed to detect compression for %q", layerFiles[i]) + } + if s.GetCompression() == compression.Uncompressed { + if compress { + ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + labels := map[string]string{ + "containerd.io/uncompressed": desc.Digest.String(), + } + layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels)) + if err != nil { + s.Close() + return nil, err + } + layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip + } else { + layers[i].MediaType = images.MediaTypeDockerSchema2Layer + } + } else { + layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip + } + s.Close() + + } + return layers, nil +} + +func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) { + w, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to open writer") + } + + defer func() { + w.Close() + if err != nil { + cs.Abort(ctx, ref) + } + }() + if err := w.Truncate(0); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to truncate writer") + } + + cw, err := compression.CompressStream(w, compression.Gzip) + if err != nil { + return ocispec.Descriptor{}, err + } + + if _, err := io.Copy(cw, r); err != nil { + return ocispec.Descriptor{}, err + } + if err := cw.Close(); err != nil { + return ocispec.Descriptor{}, err + } + + cst, err := w.Status() + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to get writer status") + } + + desc.Digest = w.Digest() + desc.Size = cst.Offset + + if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil { + if !errdefs.IsAlreadyExists(err) { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to commit") + } + } + + return desc, nil +} + +func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) { + manifestBytes, err := json.Marshal(manifest) + if err != nil { + return ocispec.Descriptor{}, err + } + + desc := ocispec.Descriptor{ + MediaType: mediaType, + Digest: digest.FromBytes(manifestBytes), + Size: int64(len(manifestBytes)), + } + if err := content.WriteBlob(ctx, cs, "manifest-"+desc.Digest.String(), bytes.NewReader(manifestBytes), desc); err != nil { + return ocispec.Descriptor{}, err + } + + return desc, nil +} diff --git a/vendor/github.com/containerd/containerd/images/archive/reference.go b/vendor/github.com/containerd/containerd/images/archive/reference.go new file mode 100644 index 00000000..ce9fe98f --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/archive/reference.go @@ -0,0 +1,112 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "strings" + + "github.com/containerd/containerd/reference" + distref "github.com/containerd/containerd/reference/docker" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// FilterRefPrefix restricts references to having the given image +// prefix. Tag-only references will have the prefix prepended. +func FilterRefPrefix(image string) func(string) string { + return refTranslator(image, true) +} + +// AddRefPrefix prepends the given image prefix to tag-only references, +// while leaving returning full references unmodified. +func AddRefPrefix(image string) func(string) string { + return refTranslator(image, false) +} + +// refTranslator creates a reference which only has a tag or verifies +// a full reference. +func refTranslator(image string, checkPrefix bool) func(string) string { + return func(ref string) string { + // Check if ref is full reference + if strings.ContainsAny(ref, "/:@") { + // If not prefixed, don't include image + if checkPrefix && !isImagePrefix(ref, image) { + return "" + } + return ref + } + return image + ":" + ref + } +} + +func isImagePrefix(s, prefix string) bool { + if !strings.HasPrefix(s, prefix) { + return false + } + if len(s) > len(prefix) { + switch s[len(prefix)] { + case '/', ':', '@': + // Prevent matching partial namespaces + default: + return false + } + } + return true +} + +func normalizeReference(ref string) (string, error) { + // TODO: Replace this function to not depend on reference package + normalized, err := distref.ParseDockerRef(ref) + if err != nil { + return "", errors.Wrapf(err, "normalize image ref %q", ref) + } + + return normalized.String(), nil +} + +func familiarizeReference(ref string) (string, error) { + named, err := distref.ParseNormalizedNamed(ref) + if err != nil { + return "", errors.Wrapf(err, "failed to parse %q", ref) + } + named = distref.TagNameOnly(named) + + return distref.FamiliarString(named), nil +} + +func ociReferenceName(name string) string { + // OCI defines the reference name as only a tag excluding the + // repository. The containerd annotation contains the full image name + // since the tag is insufficient for correctly naming and referring to an + // image + var ociRef string + if spec, err := reference.Parse(name); err == nil { + ociRef = spec.Object + } else { + ociRef = name + } + + return ociRef +} + +// DigestTranslator creates a digest reference by adding the +// digest to an image name +func DigestTranslator(prefix string) func(digest.Digest) string { + return func(dgst digest.Digest) string { + return prefix + "@" + dgst.String() + } +} diff --git a/vendor/github.com/containerd/containerd/images/handlers.go b/vendor/github.com/containerd/containerd/images/handlers.go new file mode 100644 index 00000000..8eb86a3f --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/handlers.go @@ -0,0 +1,263 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "fmt" + "sort" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/platforms" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" +) + +var ( + // ErrSkipDesc is used to skip processing of a descriptor and + // its descendants. + ErrSkipDesc = fmt.Errorf("skip descriptor") + + // ErrStopHandler is used to signify that the descriptor + // has been handled and should not be handled further. + // This applies only to a single descriptor in a handler + // chain and does not apply to descendant descriptors. + ErrStopHandler = fmt.Errorf("stop handler") +) + +// Handler handles image manifests +type Handler interface { + Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) +} + +// HandlerFunc function implementing the Handler interface +type HandlerFunc func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) + +// Handle image manifests +func (fn HandlerFunc) Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + return fn(ctx, desc) +} + +// Handlers returns a handler that will run the handlers in sequence. +// +// A handler may return `ErrStopHandler` to stop calling additional handlers +func Handlers(handlers ...Handler) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + var children []ocispec.Descriptor + for _, handler := range handlers { + ch, err := handler.Handle(ctx, desc) + if err != nil { + if errors.Cause(err) == ErrStopHandler { + break + } + return nil, err + } + + children = append(children, ch...) + } + + return children, nil + } +} + +// Walk the resources of an image and call the handler for each. If the handler +// decodes the sub-resources for each image, +// +// This differs from dispatch in that each sibling resource is considered +// synchronously. +func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error { + for _, desc := range descs { + + children, err := handler.Handle(ctx, desc) + if err != nil { + if errors.Cause(err) == ErrSkipDesc { + continue // don't traverse the children. + } + return err + } + + if len(children) > 0 { + if err := Walk(ctx, handler, children...); err != nil { + return err + } + } + } + + return nil +} + +// Dispatch runs the provided handler for content specified by the descriptors. +// If the handler decode subresources, they will be visited, as well. +// +// Handlers for siblings are run in parallel on the provided descriptors. A +// handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse +// any children. +// +// A concurrency limiter can be passed in to limit the number of concurrent +// handlers running. When limiter is nil, there is no limit. +// +// Typically, this function will be used with `FetchHandler`, often composed +// with other handlers. +// +// If any handler returns an error, the dispatch session will be canceled. +func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error { + eg, ctx2 := errgroup.WithContext(ctx) + for _, desc := range descs { + desc := desc + + if limiter != nil { + if err := limiter.Acquire(ctx, 1); err != nil { + return err + } + } + + eg.Go(func() error { + desc := desc + + children, err := handler.Handle(ctx2, desc) + if limiter != nil { + limiter.Release(1) + } + if err != nil { + if errors.Cause(err) == ErrSkipDesc { + return nil // don't traverse the children. + } + return err + } + + if len(children) > 0 { + return Dispatch(ctx2, handler, limiter, children...) + } + + return nil + }) + } + + return eg.Wait() +} + +// ChildrenHandler decodes well-known manifest types and returns their children. +// +// This is useful for supporting recursive fetch and other use cases where you +// want to do a full walk of resources. +// +// One can also replace this with another implementation to allow descending of +// arbitrary types. +func ChildrenHandler(provider content.Provider) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + return Children(ctx, provider, desc) + } +} + +// SetChildrenLabels is a handler wrapper which sets labels for the content on +// the children returned by the handler and passes through the children. +// Must follow a handler that returns the children to be labeled. +func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + if len(children) > 0 { + info := content.Info{ + Digest: desc.Digest, + Labels: map[string]string{}, + } + fields := []string{} + for i, ch := range children { + info.Labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = ch.Digest.String() + fields = append(fields, fmt.Sprintf("labels.containerd.io/gc.ref.content.%d", i)) + } + + _, err := manager.Update(ctx, info, fields...) + if err != nil { + return nil, err + } + } + + return children, err + } +} + +// FilterPlatforms is a handler wrapper which limits the descriptors returned +// based on matching the specified platform matcher. +func FilterPlatforms(f HandlerFunc, m platforms.Matcher) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + var descs []ocispec.Descriptor + + if m == nil { + descs = children + } else { + for _, d := range children { + if d.Platform == nil || m.Match(*d.Platform) { + descs = append(descs, d) + } + } + } + + return descs, nil + } +} + +// LimitManifests is a handler wrapper which filters the manifest descriptors +// returned using the provided platform. +// The results will be ordered according to the comparison operator and +// use the ordering in the manifests for equal matches. +// A limit of 0 or less is considered no limit. +// A not found error is returned if no manifest is matched. +func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + switch desc.MediaType { + case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList: + sort.SliceStable(children, func(i, j int) bool { + if children[i].Platform == nil { + return false + } + if children[j].Platform == nil { + return true + } + return m.Less(*children[i].Platform, *children[j].Platform) + }) + + if n > 0 { + if len(children) == 0 { + return children, errors.Wrap(errdefs.ErrNotFound, "no match for platform in manifest") + } + if len(children) > n { + children = children[:n] + } + } + default: + // only limit manifests from an index + } + return children, nil + } +} diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go new file mode 100644 index 00000000..ee5778d2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/image.go @@ -0,0 +1,386 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "encoding/json" + "sort" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/platforms" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// Image provides the model for how containerd views container images. +type Image struct { + // Name of the image. + // + // To be pulled, it must be a reference compatible with resolvers. + // + // This field is required. + Name string + + // Labels provide runtime decoration for the image record. + // + // There is no default behavior for how these labels are propagated. They + // only decorate the static metadata object. + // + // This field is optional. + Labels map[string]string + + // Target describes the root content for this image. Typically, this is + // a manifest, index or manifest list. + Target ocispec.Descriptor + + CreatedAt, UpdatedAt time.Time +} + +// DeleteOptions provide options on image delete +type DeleteOptions struct { + Synchronous bool +} + +// DeleteOpt allows configuring a delete operation +type DeleteOpt func(context.Context, *DeleteOptions) error + +// SynchronousDelete is used to indicate that an image deletion and removal of +// the image resources should occur synchronously before returning a result. +func SynchronousDelete() DeleteOpt { + return func(ctx context.Context, o *DeleteOptions) error { + o.Synchronous = true + return nil + } +} + +// Store and interact with images +type Store interface { + Get(ctx context.Context, name string) (Image, error) + List(ctx context.Context, filters ...string) ([]Image, error) + Create(ctx context.Context, image Image) (Image, error) + + // Update will replace the data in the store with the provided image. If + // one or more fieldpaths are provided, only those fields will be updated. + Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error) + + Delete(ctx context.Context, name string, opts ...DeleteOpt) error +} + +// TODO(stevvooe): Many of these functions make strong platform assumptions, +// which are untrue in a lot of cases. More refactoring must be done here to +// make this work in all cases. + +// Config resolves the image configuration descriptor. +// +// The caller can then use the descriptor to resolve and process the +// configuration of the image. +func (image *Image) Config(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (ocispec.Descriptor, error) { + return Config(ctx, provider, image.Target, platform) +} + +// RootFS returns the unpacked diffids that make up and images rootfs. +// +// These are used to verify that a set of layers unpacked to the expected +// values. +func (image *Image) RootFS(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) ([]digest.Digest, error) { + desc, err := image.Config(ctx, provider, platform) + if err != nil { + return nil, err + } + return RootFS(ctx, provider, desc) +} + +// Size returns the total size of an image's packed resources. +func (image *Image) Size(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (int64, error) { + var size int64 + return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if desc.Size < 0 { + return nil, errors.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType) + } + size += desc.Size + return nil, nil + }), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target) +} + +type platformManifest struct { + p *ocispec.Platform + m *ocispec.Manifest +} + +// Manifest resolves a manifest from the image for the given platform. +// +// When a manifest descriptor inside of a manifest index does not have +// a platform defined, the platform from the image config is considered. +// +// If the descriptor points to a non-index manifest, then the manifest is +// unmarshalled and returned without considering the platform inside of the +// config. +// +// TODO(stevvooe): This violates the current platform agnostic approach to this +// package by returning a specific manifest type. We'll need to refactor this +// to return a manifest descriptor or decide that we want to bring the API in +// this direction because this abstraction is not needed.` +func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) { + var ( + limit = 1 + m []platformManifest + wasIndex bool + ) + + if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + if desc.Digest != image.Digest && platform != nil { + if desc.Platform != nil && !platform.Match(*desc.Platform) { + return nil, nil + } + + if desc.Platform == nil { + p, err := content.ReadBlob(ctx, provider, manifest.Config) + if err != nil { + return nil, err + } + + var image ocispec.Image + if err := json.Unmarshal(p, &image); err != nil { + return nil, err + } + + if !platform.Match(platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) { + return nil, nil + } + + } + } + + m = append(m, platformManifest{ + p: desc.Platform, + m: &manifest, + }) + + return nil, nil + case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + var idx ocispec.Index + if err := json.Unmarshal(p, &idx); err != nil { + return nil, err + } + + if platform == nil { + return idx.Manifests, nil + } + + var descs []ocispec.Descriptor + for _, d := range idx.Manifests { + if d.Platform == nil || platform.Match(*d.Platform) { + descs = append(descs, d) + } + } + + sort.SliceStable(descs, func(i, j int) bool { + if descs[i].Platform == nil { + return false + } + if descs[j].Platform == nil { + return true + } + return platform.Less(*descs[i].Platform, *descs[j].Platform) + }) + + wasIndex = true + + if len(descs) > limit { + return descs[:limit], nil + } + return descs, nil + } + return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest) + }), image); err != nil { + return ocispec.Manifest{}, err + } + + if len(m) == 0 { + err := errors.Wrapf(errdefs.ErrNotFound, "manifest %v", image.Digest) + if wasIndex { + err = errors.Wrapf(errdefs.ErrNotFound, "no match for platform in manifest %v", image.Digest) + } + return ocispec.Manifest{}, err + } + return *m[0].m, nil +} + +// Config resolves the image configuration descriptor using a content provided +// to resolve child resources on the image. +// +// The caller can then use the descriptor to resolve and process the +// configuration of the image. +func Config(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Descriptor, error) { + manifest, err := Manifest(ctx, provider, image, platform) + if err != nil { + return ocispec.Descriptor{}, err + } + return manifest.Config, err +} + +// Platforms returns one or more platforms supported by the image. +func Platforms(ctx context.Context, provider content.Provider, image ocispec.Descriptor) ([]ocispec.Platform, error) { + var platformSpecs []ocispec.Platform + return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if desc.Platform != nil { + platformSpecs = append(platformSpecs, *desc.Platform) + return nil, ErrSkipDesc + } + + switch desc.MediaType { + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + var image ocispec.Image + if err := json.Unmarshal(p, &image); err != nil { + return nil, err + } + + platformSpecs = append(platformSpecs, + platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) + } + return nil, nil + }), ChildrenHandler(provider)), image) +} + +// Check returns nil if the all components of an image are available in the +// provider for the specified platform. +// +// If available is true, the caller can assume that required represents the +// complete set of content required for the image. +// +// missing will have the components that are part of required but not avaiiable +// in the provider. +// +// If there is a problem resolving content, an error will be returned. +func Check(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (available bool, required, present, missing []ocispec.Descriptor, err error) { + mfst, err := Manifest(ctx, provider, image, platform) + if err != nil { + if errdefs.IsNotFound(err) { + return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil + } + + return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", image.Digest) + } + + // TODO(stevvooe): It is possible that referenced conponents could have + // children, but this is rare. For now, we ignore this and only verify + // that manifest components are present. + required = append([]ocispec.Descriptor{mfst.Config}, mfst.Layers...) + + for _, desc := range required { + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + if errdefs.IsNotFound(err) { + missing = append(missing, desc) + continue + } else { + return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", desc.Digest) + } + } + ra.Close() + present = append(present, desc) + + } + + return true, required, present, missing, nil +} + +// Children returns the immediate children of content described by the descriptor. +func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + var descs []ocispec.Descriptor + switch desc.MediaType { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + // TODO(stevvooe): We just assume oci manifest, for now. There may be + // subtle differences from the docker version. + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + descs = append(descs, manifest.Config) + descs = append(descs, manifest.Layers...) + case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + var index ocispec.Index + if err := json.Unmarshal(p, &index); err != nil { + return nil, err + } + + descs = append(descs, index.Manifests...) + default: + if IsLayerType(desc.MediaType) || IsKnownConfig(desc.MediaType) { + // childless data types. + return nil, nil + } + log.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType) + } + + return descs, nil +} + +// RootFS returns the unpacked diffids that make up and images rootfs. +// +// These are used to verify that a set of layers unpacked to the expected +// values. +func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) ([]digest.Digest, error) { + p, err := content.ReadBlob(ctx, provider, configDesc) + if err != nil { + return nil, err + } + + var config ocispec.Image + if err := json.Unmarshal(p, &config); err != nil { + return nil, err + } + return config.RootFS.DiffIDs, nil +} diff --git a/vendor/github.com/containerd/containerd/images/importexport.go b/vendor/github.com/containerd/containerd/images/importexport.go new file mode 100644 index 00000000..843adcad --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/importexport.go @@ -0,0 +1,37 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Importer is the interface for image importer. +type Importer interface { + // Import imports an image from a tar stream. + Import(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) +} + +// Exporter is the interface for image exporter. +type Exporter interface { + // Export exports an image to a tar stream. + Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error +} diff --git a/vendor/github.com/containerd/containerd/images/mediatypes.go b/vendor/github.com/containerd/containerd/images/mediatypes.go new file mode 100644 index 00000000..2f47b0e6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/mediatypes.go @@ -0,0 +1,126 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "sort" + "strings" + + "github.com/containerd/containerd/errdefs" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// mediatype definitions for image components handled in containerd. +// +// oci components are generally referenced directly, although we may centralize +// here for clarity. +const ( + MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar" + MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar" + MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip" + MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json" + MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json" + MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + // Checkpoint/Restore Media Types + MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar" + MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar" + MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar" + MediaTypeContainerd1RW = "application/vnd.containerd.container.rw.tar" + MediaTypeContainerd1CheckpointConfig = "application/vnd.containerd.container.checkpoint.config.v1+proto" + MediaTypeContainerd1CheckpointOptions = "application/vnd.containerd.container.checkpoint.options.v1+proto" + MediaTypeContainerd1CheckpointRuntimeName = "application/vnd.containerd.container.checkpoint.runtime.name" + MediaTypeContainerd1CheckpointRuntimeOptions = "application/vnd.containerd.container.checkpoint.runtime.options+proto" + // Legacy Docker schema1 manifest + MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" +) + +// DiffCompression returns the compression as defined by the layer diff media +// type. For Docker media types without compression, "unknown" is returned to +// indicate that the media type may be compressed. If the media type is not +// recognized as a layer diff, then it returns errdefs.ErrNotImplemented +func DiffCompression(ctx context.Context, mediaType string) (string, error) { + base, ext := parseMediaTypes(mediaType) + switch base { + case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerForeign: + if len(ext) > 0 { + // Type is wrapped + return "", nil + } + // These media types may have been compressed but failed to + // use the correct media type. The decompression function + // should detect and handle this case. + return "unknown", nil + case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeignGzip: + if len(ext) > 0 { + // Type is wrapped + return "", nil + } + return "gzip", nil + case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable: + if len(ext) > 0 { + switch ext[len(ext)-1] { + case "gzip": + return "gzip", nil + } + } + return "", nil + default: + return "", errdefs.ErrNotImplemented + } +} + +// parseMediaTypes splits the media type into the base type and +// an array of sorted extensions +func parseMediaTypes(mt string) (string, []string) { + if mt == "" { + return "", []string{} + } + + s := strings.Split(mt, "+") + ext := s[1:] + sort.Strings(ext) + + return s[0], ext +} + +// IsLayerTypes returns true if the media type is a layer +func IsLayerType(mt string) bool { + if strings.HasPrefix(mt, "application/vnd.oci.image.layer.") { + return true + } + + // Parse Docker media types, strip off any + suffixes first + base, _ := parseMediaTypes(mt) + switch base { + case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, + MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip: + return true + } + return false +} + +// IsKnownConfig returns true if the media type is a known config type +func IsKnownConfig(mt string) bool { + switch mt { + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, + MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig: + return true + } + return false +} diff --git a/vendor/github.com/containerd/containerd/labels/validate.go b/vendor/github.com/containerd/containerd/labels/validate.go new file mode 100644 index 00000000..0de46166 --- /dev/null +++ b/vendor/github.com/containerd/containerd/labels/validate.go @@ -0,0 +1,37 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package labels + +import ( + "github.com/containerd/containerd/errdefs" + "github.com/pkg/errors" +) + +const ( + maxSize = 4096 +) + +// Validate a label's key and value are under 4096 bytes +func Validate(k, v string) error { + if (len(k) + len(v)) > maxSize { + if len(k) > 10 { + k = k[:10] + } + return errors.Wrapf(errdefs.ErrInvalidArgument, "label key and value greater than maximum size (%d bytes), key: %s", maxSize, k) + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/leases/context.go b/vendor/github.com/containerd/containerd/leases/context.go new file mode 100644 index 00000000..599c549d --- /dev/null +++ b/vendor/github.com/containerd/containerd/leases/context.go @@ -0,0 +1,40 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package leases + +import "context" + +type leaseKey struct{} + +// WithLease sets a given lease on the context +func WithLease(ctx context.Context, lid string) context.Context { + ctx = context.WithValue(ctx, leaseKey{}, lid) + + // also store on the grpc headers so it gets picked up by any clients that + // are using this. + return withGRPCLeaseHeader(ctx, lid) +} + +// FromContext returns the lease from the context. +func FromContext(ctx context.Context) (string, bool) { + lid, ok := ctx.Value(leaseKey{}).(string) + if !ok { + return fromGRPCHeader(ctx) + } + + return lid, ok +} diff --git a/vendor/github.com/containerd/containerd/leases/grpc.go b/vendor/github.com/containerd/containerd/leases/grpc.go new file mode 100644 index 00000000..22f287a8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/leases/grpc.go @@ -0,0 +1,58 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package leases + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +const ( + // GRPCHeader defines the header name for specifying a containerd lease. + GRPCHeader = "containerd-lease" +) + +func withGRPCLeaseHeader(ctx context.Context, lid string) context.Context { + // also store on the grpc headers so it gets picked up by any clients + // that are using this. + txheader := metadata.Pairs(GRPCHeader, lid) + md, ok := metadata.FromOutgoingContext(ctx) // merge with outgoing context. + if !ok { + md = txheader + } else { + // order ensures the latest is first in this list. + md = metadata.Join(txheader, md) + } + + return metadata.NewOutgoingContext(ctx, md) +} + +func fromGRPCHeader(ctx context.Context) (string, bool) { + // try to extract for use in grpc servers. + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "", false + } + + values := md[GRPCHeader] + if len(values) == 0 { + return "", false + } + + return values[0], true +} diff --git a/vendor/github.com/containerd/containerd/leases/id.go b/vendor/github.com/containerd/containerd/leases/id.go new file mode 100644 index 00000000..8781a1d7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/leases/id.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package leases + +import ( + "encoding/base64" + "fmt" + "math/rand" + "time" +) + +// WithRandomID sets the lease ID to a random unique value +func WithRandomID() Opt { + return func(l *Lease) error { + t := time.Now() + var b [3]byte + rand.Read(b[:]) + l.ID = fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:])) + return nil + } +} + +// WithID sets the ID for the lease +func WithID(id string) Opt { + return func(l *Lease) error { + l.ID = id + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/leases/lease.go b/vendor/github.com/containerd/containerd/leases/lease.go new file mode 100644 index 00000000..058d0655 --- /dev/null +++ b/vendor/github.com/containerd/containerd/leases/lease.go @@ -0,0 +1,86 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package leases + +import ( + "context" + "time" +) + +// Opt is used to set options on a lease +type Opt func(*Lease) error + +// DeleteOpt allows configuring a delete operation +type DeleteOpt func(context.Context, *DeleteOptions) error + +// Manager is used to create, list, and remove leases +type Manager interface { + Create(context.Context, ...Opt) (Lease, error) + Delete(context.Context, Lease, ...DeleteOpt) error + List(context.Context, ...string) ([]Lease, error) + AddResource(context.Context, Lease, Resource) error + DeleteResource(context.Context, Lease, Resource) error + ListResources(context.Context, Lease) ([]Resource, error) +} + +// Lease retains resources to prevent cleanup before +// the resources can be fully referenced. +type Lease struct { + ID string + CreatedAt time.Time + Labels map[string]string +} + +// Resource represents low level resource of image, like content, ingest and +// snapshotter. +type Resource struct { + ID string + Type string +} + +// DeleteOptions provide options on image delete +type DeleteOptions struct { + Synchronous bool +} + +// SynchronousDelete is used to indicate that a lease deletion and removal of +// any unreferenced resources should occur synchronously before returning the +// result. +func SynchronousDelete(ctx context.Context, o *DeleteOptions) error { + o.Synchronous = true + return nil +} + +// WithLabels sets labels on a lease +func WithLabels(labels map[string]string) Opt { + return func(l *Lease) error { + l.Labels = labels + return nil + } +} + +// WithExpiration sets an expiration on the lease +func WithExpiration(d time.Duration) Opt { + return func(l *Lease) error { + if l.Labels == nil { + l.Labels = map[string]string{} + } + l.Labels["containerd.io/gc.expire"] = time.Now().Add(d).Format(time.RFC3339) + + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go index 31f1a3ac..21599c4f 100644 --- a/vendor/github.com/containerd/containerd/log/context.go +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -18,7 +18,6 @@ package log import ( "context" - "sync/atomic" "github.com/sirupsen/logrus" ) @@ -38,23 +37,10 @@ type ( loggerKey struct{} ) -// TraceLevel is the log level for tracing. Trace level is lower than debug level, -// and is usually used to trace detailed behavior of the program. -const TraceLevel = logrus.Level(uint32(logrus.DebugLevel + 1)) - // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to // ensure the formatted time is always the same number of characters. const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" -// ParseLevel takes a string level and returns the Logrus log level constant. -// It supports trace level. -func ParseLevel(lvl string) (logrus.Level, error) { - if lvl == "trace" { - return TraceLevel, nil - } - return logrus.ParseLevel(lvl) -} - // WithLogger returns a new context with the provided logger. Use in // combination with logger.WithField(s) for great effect. func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { @@ -72,19 +58,3 @@ func GetLogger(ctx context.Context) *logrus.Entry { return logger.(*logrus.Entry) } - -// Trace logs a message at level Trace with the log entry passed-in. -func Trace(e *logrus.Entry, args ...interface{}) { - level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level))) - if level >= TraceLevel { - e.Debug(args...) - } -} - -// Tracef logs a message at level Trace with the log entry passed-in. -func Tracef(e *logrus.Entry, format string, args ...interface{}) { - level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level))) - if level >= TraceLevel { - e.Debugf(format, args...) - } -} diff --git a/vendor/github.com/containerd/containerd/metadata/adaptors.go b/vendor/github.com/containerd/containerd/metadata/adaptors.go new file mode 100644 index 00000000..b165c38e --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/adaptors.go @@ -0,0 +1,178 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "strings" + + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/snapshots" +) + +func adaptImage(o interface{}) filters.Adaptor { + obj := o.(images.Image) + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "name": + return obj.Name, len(obj.Name) > 0 + case "target": + if len(fieldpath) < 2 { + return "", false + } + + switch fieldpath[1] { + case "digest": + return obj.Target.Digest.String(), len(obj.Target.Digest) > 0 + case "mediatype": + return obj.Target.MediaType, len(obj.Target.MediaType) > 0 + } + case "labels": + return checkMap(fieldpath[1:], obj.Labels) + // TODO(stevvooe): Greater/Less than filters would be awesome for + // size. Let's do it! + case "annotations": + return checkMap(fieldpath[1:], obj.Target.Annotations) + } + + return "", false + }) +} +func adaptContainer(o interface{}) filters.Adaptor { + obj := o.(containers.Container) + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "id": + return obj.ID, len(obj.ID) > 0 + case "runtime": + if len(fieldpath) <= 1 { + return "", false + } + + switch fieldpath[1] { + case "name": + return obj.Runtime.Name, len(obj.Runtime.Name) > 0 + default: + return "", false + } + case "image": + return obj.Image, len(obj.Image) > 0 + case "labels": + return checkMap(fieldpath[1:], obj.Labels) + } + + return "", false + }) +} + +func adaptContentInfo(info content.Info) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "digest": + return info.Digest.String(), true + case "size": + // TODO: support size based filtering + case "labels": + return checkMap(fieldpath[1:], info.Labels) + } + + return "", false + }) +} + +func adaptContentStatus(status content.Status) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "ref": + return status.Ref, true + } + + return "", false + }) +} + +func adaptLease(lease leases.Lease) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "id": + return lease.ID, len(lease.ID) > 0 + case "labels": + return checkMap(fieldpath[1:], lease.Labels) + } + + return "", false + }) +} + +func adaptSnapshot(info snapshots.Info) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "kind": + switch info.Kind { + case snapshots.KindActive: + return "active", true + case snapshots.KindView: + return "view", true + case snapshots.KindCommitted: + return "committed", true + } + case "name": + return info.Name, true + case "parent": + return info.Parent, true + case "labels": + return checkMap(fieldpath[1:], info.Labels) + } + + return "", false + }) +} + +func checkMap(fieldpath []string, m map[string]string) (string, bool) { + if len(m) == 0 { + return "", false + } + + value, ok := m[strings.Join(fieldpath, ".")] + return value, ok +} diff --git a/vendor/github.com/containerd/containerd/metadata/bolt.go b/vendor/github.com/containerd/containerd/metadata/bolt.go new file mode 100644 index 00000000..6ea46086 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/bolt.go @@ -0,0 +1,61 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +type transactionKey struct{} + +// WithTransactionContext returns a new context holding the provided +// bolt transaction. Functions which require a bolt transaction will +// first check to see if a transaction is already created on the +// context before creating their own. +func WithTransactionContext(ctx context.Context, tx *bolt.Tx) context.Context { + return context.WithValue(ctx, transactionKey{}, tx) +} + +type transactor interface { + View(fn func(*bolt.Tx) error) error + Update(fn func(*bolt.Tx) error) error +} + +// view gets a bolt db transaction either from the context +// or starts a new one with the provided bolt database. +func view(ctx context.Context, db transactor, fn func(*bolt.Tx) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return db.View(fn) + } + return fn(tx) +} + +// update gets a writable bolt db transaction either from the context +// or starts a new one with the provided bolt database. +func update(ctx context.Context, db transactor, fn func(*bolt.Tx) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return db.Update(fn) + } else if !tx.Writable() { + return errors.Wrap(bolt.ErrTxNotWritable, "unable to use transaction from context") + } + return fn(tx) +} diff --git a/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go b/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go new file mode 100644 index 00000000..94af315f --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package boltutil + +import ( + "time" + + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +var ( + bucketKeyAnnotations = []byte("annotations") + bucketKeyLabels = []byte("labels") + bucketKeyCreatedAt = []byte("createdat") + bucketKeyUpdatedAt = []byte("updatedat") +) + +// ReadLabels reads the labels key from the bucket +// Uses the key "labels" +func ReadLabels(bkt *bolt.Bucket) (map[string]string, error) { + return readMap(bkt, bucketKeyLabels) +} + +// ReadAnnotations reads the OCI Descriptor Annotations key from the bucket +// Uses the key "annotations" +func ReadAnnotations(bkt *bolt.Bucket) (map[string]string, error) { + return readMap(bkt, bucketKeyAnnotations) +} + +func readMap(bkt *bolt.Bucket, bucketName []byte) (map[string]string, error) { + lbkt := bkt.Bucket(bucketName) + if lbkt == nil { + return nil, nil + } + labels := map[string]string{} + if err := lbkt.ForEach(func(k, v []byte) error { + labels[string(k)] = string(v) + return nil + }); err != nil { + return nil, err + } + return labels, nil +} + +// WriteLabels will write a new labels bucket to the provided bucket at key +// bucketKeyLabels, replacing the contents of the bucket with the provided map. +// +// The provide map labels will be modified to have the final contents of the +// bucket. Typically, this removes zero-value entries. +// Uses the key "labels" +func WriteLabels(bkt *bolt.Bucket, labels map[string]string) error { + return writeMap(bkt, bucketKeyLabels, labels) +} + +// WriteAnnotations writes the OCI Descriptor Annotations +func WriteAnnotations(bkt *bolt.Bucket, labels map[string]string) error { + return writeMap(bkt, bucketKeyAnnotations, labels) +} + +func writeMap(bkt *bolt.Bucket, bucketName []byte, labels map[string]string) error { + // Remove existing labels to keep from merging + if lbkt := bkt.Bucket(bucketName); lbkt != nil { + if err := bkt.DeleteBucket(bucketName); err != nil { + return err + } + } + + if len(labels) == 0 { + return nil + } + + lbkt, err := bkt.CreateBucket(bucketName) + if err != nil { + return err + } + + for k, v := range labels { + if v == "" { + delete(labels, k) // remove since we don't actually set it + continue + } + + if err := lbkt.Put([]byte(k), []byte(v)); err != nil { + return errors.Wrapf(err, "failed to set label %q=%q", k, v) + } + } + + return nil +} + +// ReadTimestamps reads created and updated timestamps from a bucket. +// Uses keys "createdat" and "updatedat" +func ReadTimestamps(bkt *bolt.Bucket, created, updated *time.Time) error { + for _, f := range []struct { + b []byte + t *time.Time + }{ + {bucketKeyCreatedAt, created}, + {bucketKeyUpdatedAt, updated}, + } { + v := bkt.Get(f.b) + if v != nil { + if err := f.t.UnmarshalBinary(v); err != nil { + return err + } + } + } + return nil +} + +// WriteTimestamps writes created and updated timestamps to a bucket. +// Uses keys "createdat" and "updatedat" +func WriteTimestamps(bkt *bolt.Bucket, created, updated time.Time) error { + createdAt, err := created.MarshalBinary() + if err != nil { + return err + } + updatedAt, err := updated.MarshalBinary() + if err != nil { + return err + } + for _, v := range [][2][]byte{ + {bucketKeyCreatedAt, createdAt}, + {bucketKeyUpdatedAt, updatedAt}, + } { + if err := bkt.Put(v[0], v[1]); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/containerd/containerd/metadata/buckets.go b/vendor/github.com/containerd/containerd/metadata/buckets.go new file mode 100644 index 00000000..fa947fb2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/buckets.go @@ -0,0 +1,272 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package metadata stores all labels and object specific metadata by namespace. +// This package also contains the main garbage collection logic for cleaning up +// resources consistently and atomically. Resources used by backends will be +// tracked in the metadata store to be exposed to consumers of this package. +// +// The layout where a "/" delineates a bucket is described in the following +// section. Please try to follow this as closely as possible when adding +// functionality. We can bolster this with helpers and more structure if that +// becomes an issue. +// +// Generically, we try to do the following: +// +// /// -> +// +// version: Currently, this is "v1". Additions can be made to v1 in a backwards +// compatible way. If the layout changes, a new version must be made, along +// with a migration. +// +// namespace: the namespace to which this object belongs. +// +// object: defines which object set is stored in the bucket. There are two +// special objects, "labels" and "indexes". The "labels" bucket stores the +// labels for the parent namespace. The "indexes" object is reserved for +// indexing objects, if we require in the future. +// +// key: object-specific key identifying the storage bucket for the objects +// contents. +// +// Below is the current database schema. This should be updated each time +// the structure is changed in addition to adding a migration and incrementing +// the database version. Note that `╘══*...*` refers to maps with arbitrary +// keys. +// ├──version : - Latest version, see migrations +// └──v1 - Schema version bucket +// ╘══*namespace* +// ├──labels +// │  ╘══*key* : - Label value +// ├──image +// │  ╘══*image name* +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │   ├──target +// │   │  ├──digest : - Descriptor digest +// │   │  ├──mediatype : - Descriptor media type +// │   │  └──size : - Descriptor size +// │   └──labels +// │   ╘══*key* : - Label value +// ├──containers +// │  ╘══*container id* +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │   ├──spec : - Proto marshaled spec +// │   ├──image : - Image name +// │   ├──snapshotter : - Snapshotter name +// │   ├──snapshotKey : - Snapshot key +// │   ├──runtime +// │   │  ├──name : - Runtime name +// │   │  ├──extensions +// │   │  │  ╘══*name* : - Proto marshaled extension +// │   │  └──options : - Proto marshaled options +// │   └──labels +// │   ╘══*key* : - Label value +// ├──snapshots +// │  ╘══*snapshotter* +// │   ╘══*snapshot key* +// │    ├──name : - Snapshot name in backend +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │    ├──parent : - Parent snapshot name +// │   ├──children +// │   │  ╘══*snapshot key* : - Child snapshot reference +// │   └──labels +// │   ╘══*key* : - Label value +// ├──content +// │  ├──blob +// │  │ ╘══*blob digest* +// │  │ ├──createdat : - Created at +// │  │ ├──updatedat : - Updated at +// │  │   ├──size : - Blob size +// │  │ └──labels +// │  │ ╘══*key* : - Label value +// │  └──ingests +// │   ╘══*ingest reference* +// │    ├──ref : - Ingest reference in backend +// │   ├──expireat : - Time to expire ingest +// │   └──expected : - Expected commit digest +// └──leases +// ╘══*lease id* +//   ├──createdat : - Created at +// ├──labels +// │ ╘══*key* : - Label value +//   ├──snapshots +// │  ╘══*snapshotter* +// │   ╘══*snapshot key* : - Snapshot reference +//   ├──content +// │  ╘══*blob digest* : - Content blob reference +// └──ingests +//   ╘══*ingest reference* : - Content ingest reference +package metadata + +import ( + digest "github.com/opencontainers/go-digest" + bolt "go.etcd.io/bbolt" +) + +var ( + bucketKeyVersion = []byte(schemaVersion) + bucketKeyDBVersion = []byte("version") // stores the version of the schema + bucketKeyObjectLabels = []byte("labels") // stores the labels for a namespace. + bucketKeyObjectImages = []byte("images") // stores image objects + bucketKeyObjectContainers = []byte("containers") // stores container objects + bucketKeyObjectSnapshots = []byte("snapshots") // stores snapshot references + bucketKeyObjectContent = []byte("content") // stores content references + bucketKeyObjectBlob = []byte("blob") // stores content links + bucketKeyObjectIngests = []byte("ingests") // stores ingest objects + bucketKeyObjectLeases = []byte("leases") // stores leases + + bucketKeyDigest = []byte("digest") + bucketKeyMediaType = []byte("mediatype") + bucketKeySize = []byte("size") + bucketKeyImage = []byte("image") + bucketKeyRuntime = []byte("runtime") + bucketKeyName = []byte("name") + bucketKeyParent = []byte("parent") + bucketKeyChildren = []byte("children") + bucketKeyOptions = []byte("options") + bucketKeySpec = []byte("spec") + bucketKeySnapshotKey = []byte("snapshotKey") + bucketKeySnapshotter = []byte("snapshotter") + bucketKeyTarget = []byte("target") + bucketKeyExtensions = []byte("extensions") + bucketKeyCreatedAt = []byte("createdat") + bucketKeyExpected = []byte("expected") + bucketKeyRef = []byte("ref") + bucketKeyExpireAt = []byte("expireat") + + deprecatedBucketKeyObjectIngest = []byte("ingest") // stores ingest links, deprecated in v1.2 +) + +func getBucket(tx *bolt.Tx, keys ...[]byte) *bolt.Bucket { + bkt := tx.Bucket(keys[0]) + + for _, key := range keys[1:] { + if bkt == nil { + break + } + bkt = bkt.Bucket(key) + } + + return bkt +} + +func createBucketIfNotExists(tx *bolt.Tx, keys ...[]byte) (*bolt.Bucket, error) { + bkt, err := tx.CreateBucketIfNotExists(keys[0]) + if err != nil { + return nil, err + } + + for _, key := range keys[1:] { + bkt, err = bkt.CreateBucketIfNotExists(key) + if err != nil { + return nil, err + } + } + + return bkt, nil +} + +func namespaceLabelsBucketPath(namespace string) [][]byte { + return [][]byte{bucketKeyVersion, []byte(namespace), bucketKeyObjectLabels} +} + +func withNamespacesLabelsBucket(tx *bolt.Tx, namespace string, fn func(bkt *bolt.Bucket) error) error { + bkt, err := createBucketIfNotExists(tx, namespaceLabelsBucketPath(namespace)...) + if err != nil { + return err + } + + return fn(bkt) +} + +func getNamespaceLabelsBucket(tx *bolt.Tx, namespace string) *bolt.Bucket { + return getBucket(tx, namespaceLabelsBucketPath(namespace)...) +} + +func imagesBucketPath(namespace string) [][]byte { + return [][]byte{bucketKeyVersion, []byte(namespace), bucketKeyObjectImages} +} + +func createImagesBucket(tx *bolt.Tx, namespace string) (*bolt.Bucket, error) { + return createBucketIfNotExists(tx, imagesBucketPath(namespace)...) +} + +func getImagesBucket(tx *bolt.Tx, namespace string) *bolt.Bucket { + return getBucket(tx, imagesBucketPath(namespace)...) +} + +func createContainersBucket(tx *bolt.Tx, namespace string) (*bolt.Bucket, error) { + return createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContainers) +} + +func getContainersBucket(tx *bolt.Tx, namespace string) *bolt.Bucket { + return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContainers) +} + +func getContainerBucket(tx *bolt.Tx, namespace, id string) *bolt.Bucket { + return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContainers, []byte(id)) +} + +func createSnapshotterBucket(tx *bolt.Tx, namespace, snapshotter string) (*bolt.Bucket, error) { + bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectSnapshots, []byte(snapshotter)) + if err != nil { + return nil, err + } + return bkt, nil +} + +func getSnapshottersBucket(tx *bolt.Tx, namespace string) *bolt.Bucket { + return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectSnapshots) +} + +func getSnapshotterBucket(tx *bolt.Tx, namespace, snapshotter string) *bolt.Bucket { + return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectSnapshots, []byte(snapshotter)) +} + +func createBlobBucket(tx *bolt.Tx, namespace string, dgst digest.Digest) (*bolt.Bucket, error) { + bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectBlob) + if err != nil { + return nil, err + } + return bkt.CreateBucket([]byte(dgst.String())) +} + +func getBlobsBucket(tx *bolt.Tx, namespace string) *bolt.Bucket { + return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectBlob) +} + +func getBlobBucket(tx *bolt.Tx, namespace string, dgst digest.Digest) *bolt.Bucket { + return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(dgst.String())) +} + +func getIngestsBucket(tx *bolt.Tx, namespace string) *bolt.Bucket { + return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectIngests) +} + +func createIngestBucket(tx *bolt.Tx, namespace, ref string) (*bolt.Bucket, error) { + bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectIngests, []byte(ref)) + if err != nil { + return nil, err + } + return bkt, nil +} + +func getIngestBucket(tx *bolt.Tx, namespace, ref string) *bolt.Bucket { + return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectIngests, []byte(ref)) +} diff --git a/vendor/github.com/containerd/containerd/metadata/containers.go b/vendor/github.com/containerd/containerd/metadata/containers.go new file mode 100644 index 00000000..09b0d203 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/containers.go @@ -0,0 +1,457 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + "strings" + "sync/atomic" + "time" + + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/identifiers" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/namespaces" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +type containerStore struct { + db *DB +} + +// NewContainerStore returns a Store backed by an underlying bolt DB +func NewContainerStore(db *DB) containers.Store { + return &containerStore{ + db: db, + } +} + +func (s *containerStore) Get(ctx context.Context, id string) (containers.Container, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return containers.Container{}, err + } + + container := containers.Container{ID: id} + + if err := view(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getContainerBucket(tx, namespace, id) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "container %q in namespace %q", id, namespace) + } + + if err := readContainer(&container, bkt); err != nil { + return errors.Wrapf(err, "failed to read container %q", id) + } + + return nil + }); err != nil { + return containers.Container{}, err + } + + return container, nil +} + +func (s *containerStore) List(ctx context.Context, fs ...string) ([]containers.Container, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + filter, err := filters.ParseAll(fs...) + if err != nil { + return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) + } + + var m []containers.Container + + if err := view(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getContainersBucket(tx, namespace) + if bkt == nil { + return nil // empty store + } + + return bkt.ForEach(func(k, v []byte) error { + cbkt := bkt.Bucket(k) + if cbkt == nil { + return nil + } + container := containers.Container{ID: string(k)} + + if err := readContainer(&container, cbkt); err != nil { + return errors.Wrapf(err, "failed to read container %q", string(k)) + } + + if filter.Match(adaptContainer(container)) { + m = append(m, container) + } + return nil + }) + }); err != nil { + return nil, err + } + + return m, nil +} + +func (s *containerStore) Create(ctx context.Context, container containers.Container) (containers.Container, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return containers.Container{}, err + } + + if err := validateContainer(&container); err != nil { + return containers.Container{}, errors.Wrap(err, "create container failed validation") + } + + if err := update(ctx, s.db, func(tx *bolt.Tx) error { + bkt, err := createContainersBucket(tx, namespace) + if err != nil { + return err + } + + cbkt, err := bkt.CreateBucket([]byte(container.ID)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errors.Wrapf(errdefs.ErrAlreadyExists, "container %q", container.ID) + } + return err + } + + container.CreatedAt = time.Now().UTC() + container.UpdatedAt = container.CreatedAt + if err := writeContainer(cbkt, &container); err != nil { + return errors.Wrapf(err, "failed to write container %q", container.ID) + } + + return nil + }); err != nil { + return containers.Container{}, err + } + + return container, nil +} + +func (s *containerStore) Update(ctx context.Context, container containers.Container, fieldpaths ...string) (containers.Container, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return containers.Container{}, err + } + + if container.ID == "" { + return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "must specify a container id") + } + + var updated containers.Container + if err := update(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getContainersBucket(tx, namespace) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "cannot update container %q in namespace %q", container.ID, namespace) + } + + cbkt := bkt.Bucket([]byte(container.ID)) + if cbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "container %q", container.ID) + } + + if err := readContainer(&updated, cbkt); err != nil { + return errors.Wrapf(err, "failed to read container %q", container.ID) + } + createdat := updated.CreatedAt + updated.ID = container.ID + + if len(fieldpaths) == 0 { + // only allow updates to these field on full replace. + fieldpaths = []string{"labels", "spec", "extensions", "image", "snapshotkey"} + + // Fields that are immutable must cause an error when no field paths + // are provided. This allows these fields to become mutable in the + // future. + if updated.Snapshotter != container.Snapshotter { + return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter field is immutable") + } + + if updated.Runtime.Name != container.Runtime.Name { + return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name field is immutable") + } + } + + // apply the field mask. If you update this code, you better follow the + // field mask rules in field_mask.proto. If you don't know what this + // is, do not update this code. + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if updated.Labels == nil { + updated.Labels = map[string]string{} + } + key := strings.TrimPrefix(path, "labels.") + updated.Labels[key] = container.Labels[key] + continue + } + + if strings.HasPrefix(path, "extensions.") { + if updated.Extensions == nil { + updated.Extensions = map[string]types.Any{} + } + key := strings.TrimPrefix(path, "extensions.") + updated.Extensions[key] = container.Extensions[key] + continue + } + + switch path { + case "labels": + updated.Labels = container.Labels + case "spec": + updated.Spec = container.Spec + case "extensions": + updated.Extensions = container.Extensions + case "image": + updated.Image = container.Image + case "snapshotkey": + updated.SnapshotKey = container.SnapshotKey + default: + return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on %q", path, container.ID) + } + } + + if err := validateContainer(&updated); err != nil { + return errors.Wrap(err, "update failed validation") + } + + updated.CreatedAt = createdat + updated.UpdatedAt = time.Now().UTC() + if err := writeContainer(cbkt, &updated); err != nil { + return errors.Wrapf(err, "failed to write container %q", container.ID) + } + + return nil + }); err != nil { + return containers.Container{}, err + } + + return updated, nil +} + +func (s *containerStore) Delete(ctx context.Context, id string) error { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + return update(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getContainersBucket(tx, namespace) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %q in namespace %q", id, namespace) + } + + if err := bkt.DeleteBucket([]byte(id)); err != nil { + if err == bolt.ErrBucketNotFound { + err = errors.Wrapf(errdefs.ErrNotFound, "container %v", id) + } + return err + } + + atomic.AddUint32(&s.db.dirty, 1) + + return nil + }) +} + +func validateContainer(container *containers.Container) error { + if err := identifiers.Validate(container.ID); err != nil { + return errors.Wrap(err, "container.ID") + } + + for k := range container.Extensions { + if k == "" { + return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Extension keys must not be zero-length") + } + } + + // image has no validation + for k, v := range container.Labels { + if err := labels.Validate(k, v); err == nil { + return errors.Wrapf(err, "containers.Labels") + } + } + + if container.Runtime.Name == "" { + return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name must be set") + } + + if container.Spec == nil { + return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Spec must be set") + } + + if container.SnapshotKey != "" && container.Snapshotter == "" { + return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set if container.SnapshotKey is set") + } + + return nil +} + +func readContainer(container *containers.Container, bkt *bolt.Bucket) error { + labels, err := boltutil.ReadLabels(bkt) + if err != nil { + return err + } + container.Labels = labels + + if err := boltutil.ReadTimestamps(bkt, &container.CreatedAt, &container.UpdatedAt); err != nil { + return err + } + + return bkt.ForEach(func(k, v []byte) error { + switch string(k) { + case string(bucketKeyImage): + container.Image = string(v) + case string(bucketKeyRuntime): + rbkt := bkt.Bucket(bucketKeyRuntime) + if rbkt == nil { + return nil // skip runtime. should be an error? + } + + n := rbkt.Get(bucketKeyName) + if n != nil { + container.Runtime.Name = string(n) + } + + obkt := rbkt.Get(bucketKeyOptions) + if obkt == nil { + return nil + } + + var any types.Any + if err := proto.Unmarshal(obkt, &any); err != nil { + return err + } + container.Runtime.Options = &any + case string(bucketKeySpec): + var any types.Any + if err := proto.Unmarshal(v, &any); err != nil { + return err + } + container.Spec = &any + case string(bucketKeySnapshotKey): + container.SnapshotKey = string(v) + case string(bucketKeySnapshotter): + container.Snapshotter = string(v) + case string(bucketKeyExtensions): + ebkt := bkt.Bucket(bucketKeyExtensions) + if ebkt == nil { + return nil + } + + extensions := make(map[string]types.Any) + if err := ebkt.ForEach(func(k, v []byte) error { + var a types.Any + if err := proto.Unmarshal(v, &a); err != nil { + return err + } + + extensions[string(k)] = a + return nil + }); err != nil { + + return err + } + + container.Extensions = extensions + } + + return nil + }) +} + +func writeContainer(bkt *bolt.Bucket, container *containers.Container) error { + if err := boltutil.WriteTimestamps(bkt, container.CreatedAt, container.UpdatedAt); err != nil { + return err + } + + if container.Spec != nil { + spec, err := container.Spec.Marshal() + if err != nil { + return err + } + + if err := bkt.Put(bucketKeySpec, spec); err != nil { + return err + } + } + + for _, v := range [][2][]byte{ + {bucketKeyImage, []byte(container.Image)}, + {bucketKeySnapshotter, []byte(container.Snapshotter)}, + {bucketKeySnapshotKey, []byte(container.SnapshotKey)}, + } { + if err := bkt.Put(v[0], v[1]); err != nil { + return err + } + } + + if rbkt := bkt.Bucket(bucketKeyRuntime); rbkt != nil { + if err := bkt.DeleteBucket(bucketKeyRuntime); err != nil { + return err + } + } + + rbkt, err := bkt.CreateBucket(bucketKeyRuntime) + if err != nil { + return err + } + + if err := rbkt.Put(bucketKeyName, []byte(container.Runtime.Name)); err != nil { + return err + } + + if len(container.Extensions) > 0 { + ebkt, err := bkt.CreateBucketIfNotExists(bucketKeyExtensions) + if err != nil { + return err + } + + for name, ext := range container.Extensions { + p, err := proto.Marshal(&ext) + if err != nil { + return err + } + + if err := ebkt.Put([]byte(name), p); err != nil { + return err + } + } + } + + if container.Runtime.Options != nil { + data, err := proto.Marshal(container.Runtime.Options) + if err != nil { + return err + } + + if err := rbkt.Put(bucketKeyOptions, data); err != nil { + return err + } + } + + return boltutil.WriteLabels(bkt, container.Labels) +} diff --git a/vendor/github.com/containerd/containerd/metadata/content.go b/vendor/github.com/containerd/containerd/metadata/content.go new file mode 100644 index 00000000..268a9b1b --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/content.go @@ -0,0 +1,891 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + "encoding/binary" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/namespaces" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +type contentStore struct { + content.Store + db *DB + shared bool + l sync.RWMutex +} + +// newContentStore returns a namespaced content store using an existing +// content store interface. +// policy defines the sharing behavior for content between namespaces. Both +// modes will result in shared storage in the backend for committed. Choose +// "shared" to prevent separate namespaces from having to pull the same content +// twice. Choose "isolated" if the content must not be shared between +// namespaces. +// +// If the policy is "shared", writes will try to resolve the "expected" digest +// against the backend, allowing imports of content from other namespaces. In +// "isolated" mode, the client must prove they have the content by providing +// the entire blob before the content can be added to another namespace. +// +// Since we have only two policies right now, it's simpler using bool to +// represent it internally. +func newContentStore(db *DB, shared bool, cs content.Store) *contentStore { + return &contentStore{ + Store: cs, + db: db, + shared: shared, + } +} + +func (cs *contentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return content.Info{}, err + } + + var info content.Info + if err := view(ctx, cs.db, func(tx *bolt.Tx) error { + bkt := getBlobBucket(tx, ns, dgst) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", dgst) + } + + info.Digest = dgst + return readInfo(&info, bkt) + }); err != nil { + return content.Info{}, err + } + + return info, nil +} + +func (cs *contentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return content.Info{}, err + } + + cs.l.RLock() + defer cs.l.RUnlock() + + updated := content.Info{ + Digest: info.Digest, + } + if err := update(ctx, cs.db, func(tx *bolt.Tx) error { + bkt := getBlobBucket(tx, ns, info.Digest) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", info.Digest) + } + + if err := readInfo(&updated, bkt); err != nil { + return errors.Wrapf(err, "info %q", info.Digest) + } + + if len(fieldpaths) > 0 { + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if updated.Labels == nil { + updated.Labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + updated.Labels[key] = info.Labels[key] + continue + } + + switch path { + case "labels": + updated.Labels = info.Labels + default: + return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest) + } + } + } else { + // Set mutable fields + updated.Labels = info.Labels + } + if err := validateInfo(&updated); err != nil { + return err + } + + updated.UpdatedAt = time.Now().UTC() + return writeInfo(&updated, bkt) + }); err != nil { + return content.Info{}, err + } + return updated, nil +} + +func (cs *contentStore) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + filter, err := filters.ParseAll(fs...) + if err != nil { + return err + } + + // TODO: Batch results to keep from reading all info into memory + var infos []content.Info + if err := view(ctx, cs.db, func(tx *bolt.Tx) error { + bkt := getBlobsBucket(tx, ns) + if bkt == nil { + return nil + } + + return bkt.ForEach(func(k, v []byte) error { + dgst, err := digest.Parse(string(k)) + if err != nil { + // Not a digest, skip + return nil + } + bbkt := bkt.Bucket(k) + if bbkt == nil { + return nil + } + info := content.Info{ + Digest: dgst, + } + if err := readInfo(&info, bkt.Bucket(k)); err != nil { + return err + } + if filter.Match(adaptContentInfo(info)) { + infos = append(infos, info) + } + return nil + }) + }); err != nil { + return err + } + + for _, info := range infos { + if err := fn(info); err != nil { + return err + } + } + + return nil +} + +func (cs *contentStore) Delete(ctx context.Context, dgst digest.Digest) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + cs.l.RLock() + defer cs.l.RUnlock() + + return update(ctx, cs.db, func(tx *bolt.Tx) error { + bkt := getBlobBucket(tx, ns, dgst) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", dgst) + } + + if err := getBlobsBucket(tx, ns).DeleteBucket([]byte(dgst.String())); err != nil { + return err + } + if err := removeContentLease(ctx, tx, dgst); err != nil { + return err + } + + // Mark content store as dirty for triggering garbage collection + atomic.AddUint32(&cs.db.dirty, 1) + cs.db.dirtyCS = true + + return nil + }) +} + +func (cs *contentStore) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + filter, err := filters.ParseAll(fs...) + if err != nil { + return nil, err + } + + brefs := map[string]string{} + if err := view(ctx, cs.db, func(tx *bolt.Tx) error { + bkt := getIngestsBucket(tx, ns) + if bkt == nil { + return nil + } + + return bkt.ForEach(func(k, v []byte) error { + if v == nil { + // TODO(dmcgowan): match name and potentially labels here + brefs[string(k)] = string(bkt.Bucket(k).Get(bucketKeyRef)) + } + return nil + }) + }); err != nil { + return nil, err + } + + statuses := make([]content.Status, 0, len(brefs)) + for k, bref := range brefs { + status, err := cs.Store.Status(ctx, bref) + if err != nil { + if errdefs.IsNotFound(err) { + continue + } + return nil, err + } + status.Ref = k + + if filter.Match(adaptContentStatus(status)) { + statuses = append(statuses, status) + } + } + + return statuses, nil + +} + +func getRef(tx *bolt.Tx, ns, ref string) string { + bkt := getIngestBucket(tx, ns, ref) + if bkt == nil { + return "" + } + v := bkt.Get(bucketKeyRef) + if len(v) == 0 { + return "" + } + return string(v) +} + +func (cs *contentStore) Status(ctx context.Context, ref string) (content.Status, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return content.Status{}, err + } + + var bref string + if err := view(ctx, cs.db, func(tx *bolt.Tx) error { + bref = getRef(tx, ns, ref) + if bref == "" { + return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref) + } + + return nil + }); err != nil { + return content.Status{}, err + } + + st, err := cs.Store.Status(ctx, bref) + if err != nil { + return content.Status{}, err + } + st.Ref = ref + return st, nil +} + +func (cs *contentStore) Abort(ctx context.Context, ref string) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + cs.l.RLock() + defer cs.l.RUnlock() + + return update(ctx, cs.db, func(tx *bolt.Tx) error { + ibkt := getIngestsBucket(tx, ns) + if ibkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref) + } + bkt := ibkt.Bucket([]byte(ref)) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref) + } + bref := string(bkt.Get(bucketKeyRef)) + if bref == "" { + return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref) + } + expected := string(bkt.Get(bucketKeyExpected)) + if err := ibkt.DeleteBucket([]byte(ref)); err != nil { + return err + } + + if err := removeIngestLease(ctx, tx, ref); err != nil { + return err + } + + // if not shared content, delete active ingest on backend + if expected == "" { + return cs.Store.Abort(ctx, bref) + } + + return nil + }) + +} + +func (cs *contentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + // TODO(AkihiroSuda): we could create a random string or one calculated based on the context + // https://github.com/containerd/containerd/issues/2129#issuecomment-380255019 + if wOpts.Ref == "" { + return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") + } + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + cs.l.RLock() + defer cs.l.RUnlock() + + var ( + w content.Writer + exists bool + bref string + ) + if err := update(ctx, cs.db, func(tx *bolt.Tx) error { + var shared bool + if wOpts.Desc.Digest != "" { + cbkt := getBlobBucket(tx, ns, wOpts.Desc.Digest) + if cbkt != nil { + // Add content to lease to prevent other reference removals + // from effecting this object during a provided lease + if err := addContentLease(ctx, tx, wOpts.Desc.Digest); err != nil { + return errors.Wrap(err, "unable to lease content") + } + // Return error outside of transaction to ensure + // commit succeeds with the lease. + exists = true + return nil + } + + if cs.shared { + if st, err := cs.Store.Info(ctx, wOpts.Desc.Digest); err == nil { + // Ensure the expected size is the same, it is likely + // an error if the size is mismatched but the caller + // must resolve this on commit + if wOpts.Desc.Size == 0 || wOpts.Desc.Size == st.Size { + shared = true + wOpts.Desc.Size = st.Size + } + } + } + } + + bkt, err := createIngestBucket(tx, ns, wOpts.Ref) + if err != nil { + return err + } + + leased, err := addIngestLease(ctx, tx, wOpts.Ref) + if err != nil { + return err + } + + brefb := bkt.Get(bucketKeyRef) + if brefb == nil { + sid, err := bkt.NextSequence() + if err != nil { + return err + } + + bref = createKey(sid, ns, wOpts.Ref) + if err := bkt.Put(bucketKeyRef, []byte(bref)); err != nil { + return err + } + } else { + bref = string(brefb) + } + if !leased { + // Add timestamp to allow aborting once stale + // When lease is set the ingest should be aborted + // after lease it belonged to is deleted. + // Expiration can be configurable in the future to + // give more control to the daemon, however leases + // already give users more control of expiration. + expireAt := time.Now().UTC().Add(24 * time.Hour) + if err := writeExpireAt(expireAt, bkt); err != nil { + return err + } + } + + if shared { + if err := bkt.Put(bucketKeyExpected, []byte(wOpts.Desc.Digest)); err != nil { + return err + } + } else { + // Do not use the passed in expected value here since it was + // already checked against the user metadata. The content must + // be committed in the namespace before it will be seen as + // available in the current namespace. + desc := wOpts.Desc + desc.Digest = "" + w, err = cs.Store.Writer(ctx, content.WithRef(bref), content.WithDescriptor(desc)) + } + return err + }); err != nil { + return nil, err + } + if exists { + return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", wOpts.Desc.Digest) + } + + return &namespacedWriter{ + ctx: ctx, + ref: wOpts.Ref, + namespace: ns, + db: cs.db, + provider: cs.Store, + l: &cs.l, + w: w, + bref: bref, + started: time.Now(), + desc: wOpts.Desc, + }, nil +} + +type namespacedWriter struct { + ctx context.Context + ref string + namespace string + db transactor + provider interface { + content.Provider + content.Ingester + } + l *sync.RWMutex + + w content.Writer + + bref string + started time.Time + desc ocispec.Descriptor +} + +func (nw *namespacedWriter) Close() error { + if nw.w != nil { + return nw.w.Close() + } + return nil +} + +func (nw *namespacedWriter) Write(p []byte) (int, error) { + // if no writer, first copy and unshare before performing write + if nw.w == nil { + if len(p) == 0 { + return 0, nil + } + + if err := nw.createAndCopy(nw.ctx, nw.desc); err != nil { + return 0, err + } + } + + return nw.w.Write(p) +} + +func (nw *namespacedWriter) Digest() digest.Digest { + if nw.w != nil { + return nw.w.Digest() + } + return nw.desc.Digest +} + +func (nw *namespacedWriter) Truncate(size int64) error { + if nw.w != nil { + return nw.w.Truncate(size) + } + desc := nw.desc + desc.Size = size + desc.Digest = "" + return nw.createAndCopy(nw.ctx, desc) +} + +func (nw *namespacedWriter) createAndCopy(ctx context.Context, desc ocispec.Descriptor) error { + nwDescWithoutDigest := desc + nwDescWithoutDigest.Digest = "" + w, err := nw.provider.Writer(ctx, content.WithRef(nw.bref), content.WithDescriptor(nwDescWithoutDigest)) + if err != nil { + return err + } + + if desc.Size > 0 { + ra, err := nw.provider.ReaderAt(ctx, nw.desc) + if err != nil { + return err + } + defer ra.Close() + + if err := content.CopyReaderAt(w, ra, desc.Size); err != nil { + nw.w.Close() + nw.w = nil + return err + } + } + nw.w = w + + return nil +} + +func (nw *namespacedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + ctx = namespaces.WithNamespace(ctx, nw.namespace) + + nw.l.RLock() + defer nw.l.RUnlock() + + var innerErr error + + if err := update(ctx, nw.db, func(tx *bolt.Tx) error { + dgst, err := nw.commit(ctx, tx, size, expected, opts...) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return err + } + innerErr = err + } + bkt := getIngestsBucket(tx, nw.namespace) + if bkt != nil { + if err := bkt.DeleteBucket([]byte(nw.ref)); err != nil && err != bolt.ErrBucketNotFound { + return err + } + } + if err := removeIngestLease(ctx, tx, nw.ref); err != nil { + return err + } + return addContentLease(ctx, tx, dgst) + }); err != nil { + return err + } + + return innerErr +} + +func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64, expected digest.Digest, opts ...content.Opt) (digest.Digest, error) { + var base content.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + if nw.w != nil { + nw.w.Close() + } + return "", err + } + } + if err := validateInfo(&base); err != nil { + if nw.w != nil { + nw.w.Close() + } + return "", err + } + + var actual digest.Digest + if nw.w == nil { + if size != 0 && size != nw.desc.Size { + return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q failed size validation: %v != %v", nw.ref, nw.desc.Size, size) + } + if expected != "" && expected != nw.desc.Digest { + return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q unexpected digest", nw.ref) + } + size = nw.desc.Size + actual = nw.desc.Digest + } else { + status, err := nw.w.Status() + if err != nil { + nw.w.Close() + return "", err + } + if size != 0 && size != status.Offset { + nw.w.Close() + return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q failed size validation: %v != %v", nw.ref, status.Offset, size) + } + size = status.Offset + + if err := nw.w.Commit(ctx, size, expected); err != nil && !errdefs.IsAlreadyExists(err) { + return "", err + } + actual = nw.w.Digest() + } + + bkt, err := createBlobBucket(tx, nw.namespace, actual) + if err != nil { + if err == bolt.ErrBucketExists { + return actual, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", actual) + } + return "", err + } + + commitTime := time.Now().UTC() + + sizeEncoded, err := encodeInt(size) + if err != nil { + return "", err + } + + if err := boltutil.WriteTimestamps(bkt, commitTime, commitTime); err != nil { + return "", err + } + if err := boltutil.WriteLabels(bkt, base.Labels); err != nil { + return "", err + } + return actual, bkt.Put(bucketKeySize, sizeEncoded) +} + +func (nw *namespacedWriter) Status() (st content.Status, err error) { + if nw.w != nil { + st, err = nw.w.Status() + } else { + st.Offset = nw.desc.Size + st.Total = nw.desc.Size + st.StartedAt = nw.started + st.UpdatedAt = nw.started + st.Expected = nw.desc.Digest + } + if err == nil { + st.Ref = nw.ref + } + return +} + +func (cs *contentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + if err := cs.checkAccess(ctx, desc.Digest); err != nil { + return nil, err + } + return cs.Store.ReaderAt(ctx, desc) +} + +func (cs *contentStore) checkAccess(ctx context.Context, dgst digest.Digest) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + return view(ctx, cs.db, func(tx *bolt.Tx) error { + bkt := getBlobBucket(tx, ns, dgst) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", dgst) + } + return nil + }) +} + +func validateInfo(info *content.Info) error { + for k, v := range info.Labels { + if err := labels.Validate(k, v); err == nil { + return errors.Wrapf(err, "info.Labels") + } + } + + return nil +} + +func readInfo(info *content.Info, bkt *bolt.Bucket) error { + if err := boltutil.ReadTimestamps(bkt, &info.CreatedAt, &info.UpdatedAt); err != nil { + return err + } + + labels, err := boltutil.ReadLabels(bkt) + if err != nil { + return err + } + info.Labels = labels + + if v := bkt.Get(bucketKeySize); len(v) > 0 { + info.Size, _ = binary.Varint(v) + } + + return nil +} + +func writeInfo(info *content.Info, bkt *bolt.Bucket) error { + if err := boltutil.WriteTimestamps(bkt, info.CreatedAt, info.UpdatedAt); err != nil { + return err + } + + if err := boltutil.WriteLabels(bkt, info.Labels); err != nil { + return errors.Wrapf(err, "writing labels for info %v", info.Digest) + } + + // Write size + sizeEncoded, err := encodeInt(info.Size) + if err != nil { + return err + } + + return bkt.Put(bucketKeySize, sizeEncoded) +} + +func readExpireAt(bkt *bolt.Bucket) (*time.Time, error) { + v := bkt.Get(bucketKeyExpireAt) + if v == nil { + return nil, nil + } + t := &time.Time{} + if err := t.UnmarshalBinary(v); err != nil { + return nil, err + } + return t, nil +} + +func writeExpireAt(expire time.Time, bkt *bolt.Bucket) error { + expireAt, err := expire.MarshalBinary() + if err != nil { + return err + } + return bkt.Put(bucketKeyExpireAt, expireAt) +} + +func (cs *contentStore) garbageCollect(ctx context.Context) (d time.Duration, err error) { + cs.l.Lock() + t1 := time.Now() + defer func() { + if err == nil { + d = time.Since(t1) + } + cs.l.Unlock() + }() + + contentSeen := map[string]struct{}{} + ingestSeen := map[string]struct{}{} + if err := cs.db.View(func(tx *bolt.Tx) error { + v1bkt := tx.Bucket(bucketKeyVersion) + if v1bkt == nil { + return nil + } + + // iterate through each namespace + v1c := v1bkt.Cursor() + + for k, v := v1c.First(); k != nil; k, v = v1c.Next() { + if v != nil { + continue + } + + cbkt := v1bkt.Bucket(k).Bucket(bucketKeyObjectContent) + if cbkt == nil { + continue + } + bbkt := cbkt.Bucket(bucketKeyObjectBlob) + if bbkt != nil { + if err := bbkt.ForEach(func(ck, cv []byte) error { + if cv == nil { + contentSeen[string(ck)] = struct{}{} + } + return nil + }); err != nil { + return err + } + } + + ibkt := cbkt.Bucket(bucketKeyObjectIngests) + if ibkt != nil { + if err := ibkt.ForEach(func(ref, v []byte) error { + if v == nil { + bkt := ibkt.Bucket(ref) + // expected here may be from a different namespace + // so much be explicitly retained from the ingest + // in case it was removed from the other namespace + expected := bkt.Get(bucketKeyExpected) + if len(expected) > 0 { + contentSeen[string(expected)] = struct{}{} + } + bref := bkt.Get(bucketKeyRef) + if len(bref) > 0 { + ingestSeen[string(bref)] = struct{}{} + } + } + return nil + }); err != nil { + return err + } + } + } + + return nil + }); err != nil { + return 0, err + } + + err = cs.Store.Walk(ctx, func(info content.Info) error { + if _, ok := contentSeen[info.Digest.String()]; !ok { + if err := cs.Store.Delete(ctx, info.Digest); err != nil { + return err + } + log.G(ctx).WithField("digest", info.Digest).Debug("removed content") + } + return nil + }) + if err != nil { + return + } + + // If the content store has implemented a more efficient walk function + // then use that else fallback to reading all statuses which may + // cause reading of unneeded metadata. + type statusWalker interface { + WalkStatusRefs(context.Context, func(string) error) error + } + if w, ok := cs.Store.(statusWalker); ok { + err = w.WalkStatusRefs(ctx, func(ref string) error { + if _, ok := ingestSeen[ref]; !ok { + if err := cs.Store.Abort(ctx, ref); err != nil { + return err + } + log.G(ctx).WithField("ref", ref).Debug("cleanup aborting ingest") + } + return nil + }) + } else { + var statuses []content.Status + statuses, err = cs.Store.ListStatuses(ctx) + if err != nil { + return 0, err + } + for _, status := range statuses { + if _, ok := ingestSeen[status.Ref]; !ok { + if err = cs.Store.Abort(ctx, status.Ref); err != nil { + return + } + log.G(ctx).WithField("ref", status.Ref).Debug("cleanup aborting ingest") + } + } + } + return +} diff --git a/vendor/github.com/containerd/containerd/metadata/db.go b/vendor/github.com/containerd/containerd/metadata/db.go new file mode 100644 index 00000000..40d045f0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/db.go @@ -0,0 +1,444 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + "encoding/binary" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/gc" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/snapshots" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +const ( + // schemaVersion represents the schema version of + // the database. This schema version represents the + // structure of the data in the database. The schema + // can envolve at any time but any backwards + // incompatible changes or structural changes require + // bumping the schema version. + schemaVersion = "v1" + + // dbVersion represents updates to the schema + // version which are additions and compatible with + // prior version of the same schema. + dbVersion = 3 +) + +// DBOpt configures how we set up the DB +type DBOpt func(*dbOptions) + +// WithPolicyIsolated isolates contents between namespaces +func WithPolicyIsolated(o *dbOptions) { + o.shared = false +} + +// dbOptions configure db options. +type dbOptions struct { + shared bool +} + +// DB represents a metadata database backed by a bolt +// database. The database is fully namespaced and stores +// image, container, namespace, snapshot, and content data +// while proxying data shared across namespaces to backend +// datastores for content and snapshots. +type DB struct { + db *bolt.DB + ss map[string]*snapshotter + cs *contentStore + + // wlock is used to protect access to the data structures during garbage + // collection. While the wlock is held no writable transactions can be + // opened, preventing changes from occurring between the mark and + // sweep phases without preventing read transactions. + wlock sync.RWMutex + + // dirty flag indicates that references have been removed which require + // a garbage collection to ensure the database is clean. This tracks + // the number of dirty operations. This should be updated and read + // atomically if outside of wlock.Lock. + dirty uint32 + + // dirtySS and dirtyCS flags keeps track of datastores which have had + // deletions since the last garbage collection. These datastores will + // be garbage collected during the next garbage collection. These + // should only be updated inside of a write transaction or wlock.Lock. + dirtySS map[string]struct{} + dirtyCS bool + + // mutationCallbacks are called after each mutation with the flag + // set indicating whether any dirty flags are set + mutationCallbacks []func(bool) + + dbopts dbOptions +} + +// NewDB creates a new metadata database using the provided +// bolt database, content store, and snapshotters. +func NewDB(db *bolt.DB, cs content.Store, ss map[string]snapshots.Snapshotter, opts ...DBOpt) *DB { + m := &DB{ + db: db, + ss: make(map[string]*snapshotter, len(ss)), + dirtySS: map[string]struct{}{}, + dbopts: dbOptions{ + shared: true, + }, + } + + for _, opt := range opts { + opt(&m.dbopts) + } + + // Initialize data stores + m.cs = newContentStore(m, m.dbopts.shared, cs) + for name, sn := range ss { + m.ss[name] = newSnapshotter(m, name, sn) + } + + return m +} + +// Init ensures the database is at the correct version +// and performs any needed migrations. +func (m *DB) Init(ctx context.Context) error { + // errSkip is used when no migration or version needs to be written + // to the database and the transaction can be immediately rolled + // back rather than performing a much slower and unnecessary commit. + var errSkip = errors.New("skip update") + + err := m.db.Update(func(tx *bolt.Tx) error { + var ( + // current schema and version + schema = "v0" + version = 0 + ) + + // i represents the index of the first migration + // which must be run to get the database up to date. + // The migration's version will be checked in reverse + // order, decrementing i for each migration which + // represents a version newer than the current + // database version + i := len(migrations) + + for ; i > 0; i-- { + migration := migrations[i-1] + + bkt := tx.Bucket([]byte(migration.schema)) + if bkt == nil { + // Hasn't encountered another schema, go to next migration + if schema == "v0" { + continue + } + break + } + if schema == "v0" { + schema = migration.schema + vb := bkt.Get(bucketKeyDBVersion) + if vb != nil { + v, _ := binary.Varint(vb) + version = int(v) + } + } + + if version >= migration.version { + break + } + } + + // Previous version of database found + if schema != "v0" { + updates := migrations[i:] + + // No migration updates, return immediately + if len(updates) == 0 { + return errSkip + } + + for _, m := range updates { + t0 := time.Now() + if err := m.migrate(tx); err != nil { + return errors.Wrapf(err, "failed to migrate to %s.%d", m.schema, m.version) + } + log.G(ctx).WithField("d", time.Since(t0)).Debugf("finished database migration to %s.%d", m.schema, m.version) + } + } + + bkt, err := tx.CreateBucketIfNotExists(bucketKeyVersion) + if err != nil { + return err + } + + versionEncoded, err := encodeInt(dbVersion) + if err != nil { + return err + } + + return bkt.Put(bucketKeyDBVersion, versionEncoded) + }) + if err == errSkip { + err = nil + } + return err +} + +// ContentStore returns a namespaced content store +// proxied to a content store. +func (m *DB) ContentStore() content.Store { + if m.cs == nil { + return nil + } + return m.cs +} + +// Snapshotter returns a namespaced content store for +// the requested snapshotter name proxied to a snapshotter. +func (m *DB) Snapshotter(name string) snapshots.Snapshotter { + sn, ok := m.ss[name] + if !ok { + return nil + } + return sn +} + +// Snapshotters returns all available snapshotters. +func (m *DB) Snapshotters() map[string]snapshots.Snapshotter { + ss := make(map[string]snapshots.Snapshotter, len(m.ss)) + for n, sn := range m.ss { + ss[n] = sn + } + return ss +} + +// View runs a readonly transaction on the metadata store. +func (m *DB) View(fn func(*bolt.Tx) error) error { + return m.db.View(fn) +} + +// Update runs a writable transaction on the metadata store. +func (m *DB) Update(fn func(*bolt.Tx) error) error { + m.wlock.RLock() + defer m.wlock.RUnlock() + err := m.db.Update(fn) + if err == nil { + dirty := atomic.LoadUint32(&m.dirty) > 0 + for _, fn := range m.mutationCallbacks { + fn(dirty) + } + } + + return err +} + +// RegisterMutationCallback registers a function to be called after a metadata +// mutations has been performed. +// +// The callback function is an argument for whether a deletion has occurred +// since the last garbage collection. +func (m *DB) RegisterMutationCallback(fn func(bool)) { + m.wlock.Lock() + m.mutationCallbacks = append(m.mutationCallbacks, fn) + m.wlock.Unlock() +} + +// GCStats holds the duration for the different phases of the garbage collector +type GCStats struct { + MetaD time.Duration + ContentD time.Duration + SnapshotD map[string]time.Duration +} + +// Elapsed returns the duration which elapsed during a collection +func (s GCStats) Elapsed() time.Duration { + return s.MetaD +} + +// GarbageCollect starts garbage collection +func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { + m.wlock.Lock() + t1 := time.Now() + + marked, err := m.getMarked(ctx) + if err != nil { + m.wlock.Unlock() + return nil, err + } + + if err := m.db.Update(func(tx *bolt.Tx) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + rm := func(ctx context.Context, n gc.Node) error { + if _, ok := marked[n]; ok { + return nil + } + + if n.Type == ResourceSnapshot { + if idx := strings.IndexRune(n.Key, '/'); idx > 0 { + m.dirtySS[n.Key[:idx]] = struct{}{} + } + } else if n.Type == ResourceContent || n.Type == ResourceIngest { + m.dirtyCS = true + } + return remove(ctx, tx, n) + } + + if err := scanAll(ctx, tx, rm); err != nil { + return errors.Wrap(err, "failed to scan and remove") + } + + return nil + }); err != nil { + m.wlock.Unlock() + return nil, err + } + + var stats GCStats + var wg sync.WaitGroup + + // reset dirty, no need for atomic inside of wlock.Lock + m.dirty = 0 + + if len(m.dirtySS) > 0 { + var sl sync.Mutex + stats.SnapshotD = map[string]time.Duration{} + wg.Add(len(m.dirtySS)) + for snapshotterName := range m.dirtySS { + log.G(ctx).WithField("snapshotter", snapshotterName).Debug("schedule snapshotter cleanup") + go func(snapshotterName string) { + st1 := time.Now() + m.cleanupSnapshotter(snapshotterName) + + sl.Lock() + stats.SnapshotD[snapshotterName] = time.Since(st1) + sl.Unlock() + + wg.Done() + }(snapshotterName) + } + m.dirtySS = map[string]struct{}{} + } + + if m.dirtyCS { + wg.Add(1) + log.G(ctx).Debug("schedule content cleanup") + go func() { + ct1 := time.Now() + m.cleanupContent() + stats.ContentD = time.Since(ct1) + wg.Done() + }() + m.dirtyCS = false + } + + stats.MetaD = time.Since(t1) + m.wlock.Unlock() + + wg.Wait() + + return stats, err +} + +func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) { + var marked map[gc.Node]struct{} + if err := m.db.View(func(tx *bolt.Tx) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + var ( + nodes []gc.Node + wg sync.WaitGroup + roots = make(chan gc.Node) + ) + wg.Add(1) + go func() { + defer wg.Done() + for n := range roots { + nodes = append(nodes, n) + } + }() + // Call roots + if err := scanRoots(ctx, tx, roots); err != nil { + cancel() + return err + } + close(roots) + wg.Wait() + + refs := func(n gc.Node) ([]gc.Node, error) { + var sn []gc.Node + if err := references(ctx, tx, n, func(nn gc.Node) { + sn = append(sn, nn) + }); err != nil { + return nil, err + } + return sn, nil + } + + reachable, err := gc.Tricolor(nodes, refs) + if err != nil { + return err + } + marked = reachable + return nil + }); err != nil { + return nil, err + } + return marked, nil +} + +func (m *DB) cleanupSnapshotter(name string) (time.Duration, error) { + ctx := context.Background() + sn, ok := m.ss[name] + if !ok { + return 0, nil + } + + d, err := sn.garbageCollect(ctx) + logger := log.G(ctx).WithField("snapshotter", name) + if err != nil { + logger.WithError(err).Warn("snapshot garbage collection failed") + } else { + logger.WithField("d", d).Debugf("snapshot garbage collected") + } + return d, err +} + +func (m *DB) cleanupContent() (time.Duration, error) { + ctx := context.Background() + if m.cs == nil { + return 0, nil + } + + d, err := m.cs.garbageCollect(ctx) + if err != nil { + log.G(ctx).WithError(err).Warn("content garbage collection failed") + } else { + log.G(ctx).WithField("d", d).Debugf("content garbage collected") + } + + return d, err +} diff --git a/vendor/github.com/containerd/containerd/metadata/gc.go b/vendor/github.com/containerd/containerd/metadata/gc.go new file mode 100644 index 00000000..afe16c92 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/gc.go @@ -0,0 +1,513 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "bytes" + "context" + "fmt" + "strings" + "time" + + "github.com/containerd/containerd/gc" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +const ( + // ResourceUnknown specifies an unknown resource + ResourceUnknown gc.ResourceType = iota + // ResourceContent specifies a content resource + ResourceContent + // ResourceSnapshot specifies a snapshot resource + ResourceSnapshot + // ResourceContainer specifies a container resource + ResourceContainer + // ResourceTask specifies a task resource + ResourceTask + // ResourceLease specifies a lease + ResourceLease + // ResourceIngest specifies a content ingest + ResourceIngest +) + +const ( + resourceContentFlat = ResourceContent | 0x20 + resourceSnapshotFlat = ResourceSnapshot | 0x20 +) + +var ( + labelGCRoot = []byte("containerd.io/gc.root") + labelGCSnapRef = []byte("containerd.io/gc.ref.snapshot.") + labelGCContentRef = []byte("containerd.io/gc.ref.content") + labelGCExpire = []byte("containerd.io/gc.expire") + labelGCFlat = []byte("containerd.io/gc.flat") +) + +func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { + v1bkt := tx.Bucket(bucketKeyVersion) + if v1bkt == nil { + return nil + } + + expThreshold := time.Now() + + // iterate through each namespace + v1c := v1bkt.Cursor() + + // cerr indicates the scan did not successfully send all + // the roots. The scan does not need to be cancelled but + // must return error at the end. + var cerr error + fn := func(n gc.Node) { + select { + case nc <- n: + case <-ctx.Done(): + cerr = ctx.Err() + } + } + + for k, v := v1c.First(); k != nil; k, v = v1c.Next() { + if v != nil { + continue + } + nbkt := v1bkt.Bucket(k) + ns := string(k) + + lbkt := nbkt.Bucket(bucketKeyObjectLeases) + if lbkt != nil { + if err := lbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + libkt := lbkt.Bucket(k) + var flat bool + + if lblbkt := libkt.Bucket(bucketKeyObjectLabels); lblbkt != nil { + if expV := lblbkt.Get(labelGCExpire); expV != nil { + exp, err := time.Parse(time.RFC3339, string(expV)) + if err != nil { + // label not used, log and continue to use lease + log.G(ctx).WithError(err).WithField("lease", string(k)).Infof("ignoring invalid expiration value %q", string(expV)) + } else if expThreshold.After(exp) { + // lease has expired, skip + return nil + } + } + + if flatV := lblbkt.Get(labelGCFlat); flatV != nil { + flat = true + } + } + + fn(gcnode(ResourceLease, ns, string(k))) + + // Emit content and snapshots as roots instead of implementing + // in references. Since leases cannot be referenced there is + // no need to allow the lookup to be recursive, handling here + // therefore reduces the number of database seeks. + + ctype := ResourceContent + if flat { + ctype = resourceContentFlat + } + + cbkt := libkt.Bucket(bucketKeyObjectContent) + if cbkt != nil { + if err := cbkt.ForEach(func(k, v []byte) error { + fn(gcnode(ctype, ns, string(k))) + return nil + }); err != nil { + return err + } + } + + stype := ResourceSnapshot + if flat { + stype = resourceSnapshotFlat + } + + sbkt := libkt.Bucket(bucketKeyObjectSnapshots) + if sbkt != nil { + if err := sbkt.ForEach(func(sk, sv []byte) error { + if sv != nil { + return nil + } + snbkt := sbkt.Bucket(sk) + + return snbkt.ForEach(func(k, v []byte) error { + fn(gcnode(stype, ns, fmt.Sprintf("%s/%s", sk, k))) + return nil + }) + }); err != nil { + return err + } + } + + ibkt := libkt.Bucket(bucketKeyObjectIngests) + if ibkt != nil { + if err := ibkt.ForEach(func(k, v []byte) error { + fn(gcnode(ResourceIngest, ns, string(k))) + return nil + }); err != nil { + return err + } + } + + return nil + }); err != nil { + return err + } + } + + ibkt := nbkt.Bucket(bucketKeyObjectImages) + if ibkt != nil { + if err := ibkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + + target := ibkt.Bucket(k).Bucket(bucketKeyTarget) + if target != nil { + contentKey := string(target.Get(bucketKeyDigest)) + fn(gcnode(ResourceContent, ns, contentKey)) + } + return sendLabelRefs(ns, ibkt.Bucket(k), fn) + }); err != nil { + return err + } + } + + cbkt := nbkt.Bucket(bucketKeyObjectContent) + if cbkt != nil { + ibkt := cbkt.Bucket(bucketKeyObjectIngests) + if ibkt != nil { + if err := ibkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + ea, err := readExpireAt(ibkt.Bucket(k)) + if err != nil { + return err + } + if ea == nil || expThreshold.After(*ea) { + return nil + } + fn(gcnode(ResourceIngest, ns, string(k))) + return nil + }); err != nil { + return err + } + } + cbkt = cbkt.Bucket(bucketKeyObjectBlob) + if cbkt != nil { + if err := cbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + + if isRootRef(cbkt.Bucket(k)) { + fn(gcnode(ResourceContent, ns, string(k))) + } + + return nil + }); err != nil { + return err + } + } + } + + cbkt = nbkt.Bucket(bucketKeyObjectContainers) + if cbkt != nil { + if err := cbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + + cibkt := cbkt.Bucket(k) + snapshotter := string(cibkt.Get(bucketKeySnapshotter)) + if snapshotter != "" { + ss := string(cibkt.Get(bucketKeySnapshotKey)) + fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", snapshotter, ss))) + } + + return sendLabelRefs(ns, cibkt, fn) + }); err != nil { + return err + } + } + + sbkt := nbkt.Bucket(bucketKeyObjectSnapshots) + if sbkt != nil { + if err := sbkt.ForEach(func(sk, sv []byte) error { + if sv != nil { + return nil + } + snbkt := sbkt.Bucket(sk) + + return snbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + if isRootRef(snbkt.Bucket(k)) { + fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", sk, k))) + } + return nil + }) + }); err != nil { + return err + } + } + } + return cerr +} + +func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)) error { + switch node.Type { + case ResourceContent: + bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(node.Key)) + if bkt == nil { + // Node may be created from dead edge + return nil + } + + return sendLabelRefs(node.Namespace, bkt, fn) + case ResourceSnapshot, resourceSnapshotFlat: + parts := strings.SplitN(node.Key, "/", 2) + if len(parts) != 2 { + return errors.Errorf("invalid snapshot gc key %s", node.Key) + } + ss := parts[0] + name := parts[1] + + bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectSnapshots, []byte(ss), []byte(name)) + if bkt == nil { + // Node may be created from dead edge + return nil + } + + if pv := bkt.Get(bucketKeyParent); len(pv) > 0 { + fn(gcnode(node.Type, node.Namespace, fmt.Sprintf("%s/%s", ss, pv))) + } + + // Do not send labeled references for flat snapshot refs + if node.Type == resourceSnapshotFlat { + return nil + } + + return sendLabelRefs(node.Namespace, bkt, fn) + case ResourceIngest: + // Send expected value + bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectIngests, []byte(node.Key)) + if bkt == nil { + // Node may be created from dead edge + return nil + } + // Load expected + expected := bkt.Get(bucketKeyExpected) + if len(expected) > 0 { + fn(gcnode(ResourceContent, node.Namespace, string(expected))) + } + return nil + } + + return nil +} + +func scanAll(ctx context.Context, tx *bolt.Tx, fn func(ctx context.Context, n gc.Node) error) error { + v1bkt := tx.Bucket(bucketKeyVersion) + if v1bkt == nil { + return nil + } + + // iterate through each namespace + v1c := v1bkt.Cursor() + + for k, v := v1c.First(); k != nil; k, v = v1c.Next() { + if v != nil { + continue + } + nbkt := v1bkt.Bucket(k) + ns := string(k) + + lbkt := nbkt.Bucket(bucketKeyObjectLeases) + if lbkt != nil { + if err := lbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + return fn(ctx, gcnode(ResourceLease, ns, string(k))) + }); err != nil { + return err + } + } + + sbkt := nbkt.Bucket(bucketKeyObjectSnapshots) + if sbkt != nil { + if err := sbkt.ForEach(func(sk, sv []byte) error { + if sv != nil { + return nil + } + snbkt := sbkt.Bucket(sk) + return snbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + node := gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", sk, k)) + return fn(ctx, node) + }) + }); err != nil { + return err + } + } + + cbkt := nbkt.Bucket(bucketKeyObjectContent) + if cbkt != nil { + ibkt := cbkt.Bucket(bucketKeyObjectIngests) + if ibkt != nil { + if err := ibkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + node := gcnode(ResourceIngest, ns, string(k)) + return fn(ctx, node) + }); err != nil { + return err + } + } + + cbkt = cbkt.Bucket(bucketKeyObjectBlob) + if cbkt != nil { + if err := cbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + node := gcnode(ResourceContent, ns, string(k)) + return fn(ctx, node) + }); err != nil { + return err + } + } + } + } + + return nil +} + +func remove(ctx context.Context, tx *bolt.Tx, node gc.Node) error { + v1bkt := tx.Bucket(bucketKeyVersion) + if v1bkt == nil { + return nil + } + + nsbkt := v1bkt.Bucket([]byte(node.Namespace)) + if nsbkt == nil { + return nil + } + + switch node.Type { + case ResourceContent: + cbkt := nsbkt.Bucket(bucketKeyObjectContent) + if cbkt != nil { + cbkt = cbkt.Bucket(bucketKeyObjectBlob) + } + if cbkt != nil { + log.G(ctx).WithField("key", node.Key).Debug("remove content") + return cbkt.DeleteBucket([]byte(node.Key)) + } + case ResourceSnapshot: + sbkt := nsbkt.Bucket(bucketKeyObjectSnapshots) + if sbkt != nil { + parts := strings.SplitN(node.Key, "/", 2) + if len(parts) != 2 { + return errors.Errorf("invalid snapshot gc key %s", node.Key) + } + ssbkt := sbkt.Bucket([]byte(parts[0])) + if ssbkt != nil { + log.G(ctx).WithField("key", parts[1]).WithField("snapshotter", parts[0]).Debug("remove snapshot") + return ssbkt.DeleteBucket([]byte(parts[1])) + } + } + case ResourceLease: + lbkt := nsbkt.Bucket(bucketKeyObjectLeases) + if lbkt != nil { + return lbkt.DeleteBucket([]byte(node.Key)) + } + case ResourceIngest: + ibkt := nsbkt.Bucket(bucketKeyObjectContent) + if ibkt != nil { + ibkt = ibkt.Bucket(bucketKeyObjectIngests) + } + if ibkt != nil { + log.G(ctx).WithField("ref", node.Key).Debug("remove ingest") + return ibkt.DeleteBucket([]byte(node.Key)) + } + } + + return nil +} + +// sendLabelRefs sends all snapshot and content references referred to by the labels in the bkt +func sendLabelRefs(ns string, bkt *bolt.Bucket, fn func(gc.Node)) error { + lbkt := bkt.Bucket(bucketKeyObjectLabels) + if lbkt != nil { + lc := lbkt.Cursor() + + labelRef := string(labelGCContentRef) + for k, v := lc.Seek(labelGCContentRef); k != nil && strings.HasPrefix(string(k), labelRef); k, v = lc.Next() { + if ks := string(k); ks != labelRef { + // Allow reference naming separated by . or /, ignore names + if ks[len(labelRef)] != '.' && ks[len(labelRef)] != '/' { + continue + } + } + + fn(gcnode(ResourceContent, ns, string(v))) + } + + for k, v := lc.Seek(labelGCSnapRef); k != nil && strings.HasPrefix(string(k), string(labelGCSnapRef)); k, v = lc.Next() { + snapshotter := k[len(labelGCSnapRef):] + if i := bytes.IndexByte(snapshotter, '/'); i >= 0 { + snapshotter = snapshotter[:i] + } + fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", snapshotter, v))) + } + + } + return nil +} + +func isRootRef(bkt *bolt.Bucket) bool { + lbkt := bkt.Bucket(bucketKeyObjectLabels) + if lbkt != nil { + rv := lbkt.Get(labelGCRoot) + if rv != nil { + // TODO: interpret rv as a timestamp and skip if expired + return true + } + } + return false +} + +func gcnode(t gc.ResourceType, ns, key string) gc.Node { + return gc.Node{ + Type: t, + Namespace: ns, + Key: key, + } +} diff --git a/vendor/github.com/containerd/containerd/metadata/images.go b/vendor/github.com/containerd/containerd/metadata/images.go new file mode 100644 index 00000000..cace4e18 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/images.go @@ -0,0 +1,386 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + "encoding/binary" + "fmt" + "strings" + "sync/atomic" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/namespaces" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +type imageStore struct { + db *DB +} + +// NewImageStore returns a store backed by a bolt DB +func NewImageStore(db *DB) images.Store { + return &imageStore{db: db} +} + +func (s *imageStore) Get(ctx context.Context, name string) (images.Image, error) { + var image images.Image + + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return images.Image{}, err + } + + if err := view(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getImagesBucket(tx, namespace) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "image %q", name) + } + + ibkt := bkt.Bucket([]byte(name)) + if ibkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "image %q", name) + } + + image.Name = name + if err := readImage(&image, ibkt); err != nil { + return errors.Wrapf(err, "image %q", name) + } + + return nil + }); err != nil { + return images.Image{}, err + } + + return image, nil +} + +func (s *imageStore) List(ctx context.Context, fs ...string) ([]images.Image, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + filter, err := filters.ParseAll(fs...) + if err != nil { + return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) + } + + var m []images.Image + if err := view(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getImagesBucket(tx, namespace) + if bkt == nil { + return nil // empty store + } + + return bkt.ForEach(func(k, v []byte) error { + var ( + image = images.Image{ + Name: string(k), + } + kbkt = bkt.Bucket(k) + ) + + if err := readImage(&image, kbkt); err != nil { + return err + } + + if filter.Match(adaptImage(image)) { + m = append(m, image) + } + return nil + }) + }); err != nil { + return nil, err + } + + return m, nil +} + +func (s *imageStore) Create(ctx context.Context, image images.Image) (images.Image, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return images.Image{}, err + } + + if err := update(ctx, s.db, func(tx *bolt.Tx) error { + if err := validateImage(&image); err != nil { + return err + } + + bkt, err := createImagesBucket(tx, namespace) + if err != nil { + return err + } + + ibkt, err := bkt.CreateBucket([]byte(image.Name)) + if err != nil { + if err != bolt.ErrBucketExists { + return err + } + + return errors.Wrapf(errdefs.ErrAlreadyExists, "image %q", image.Name) + } + + image.CreatedAt = time.Now().UTC() + image.UpdatedAt = image.CreatedAt + return writeImage(ibkt, &image) + }); err != nil { + return images.Image{}, err + } + + return image, nil +} + +func (s *imageStore) Update(ctx context.Context, image images.Image, fieldpaths ...string) (images.Image, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return images.Image{}, err + } + + if image.Name == "" { + return images.Image{}, errors.Wrapf(errdefs.ErrInvalidArgument, "image name is required for update") + } + + var updated images.Image + + if err := update(ctx, s.db, func(tx *bolt.Tx) error { + bkt, err := createImagesBucket(tx, namespace) + if err != nil { + return err + } + + ibkt := bkt.Bucket([]byte(image.Name)) + if ibkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "image %q", image.Name) + } + + if err := readImage(&updated, ibkt); err != nil { + return errors.Wrapf(err, "image %q", image.Name) + } + createdat := updated.CreatedAt + updated.Name = image.Name + + if len(fieldpaths) > 0 { + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if updated.Labels == nil { + updated.Labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + updated.Labels[key] = image.Labels[key] + continue + } else if strings.HasPrefix(path, "annotations.") { + if updated.Target.Annotations == nil { + updated.Target.Annotations = map[string]string{} + } + + key := strings.TrimPrefix(path, "annotations.") + updated.Target.Annotations[key] = image.Target.Annotations[key] + continue + } + + switch path { + case "labels": + updated.Labels = image.Labels + case "target": + // NOTE(stevvooe): While we allow setting individual labels, we + // only support replacing the target as a unit, since that is + // commonly pulled as a unit from other sources. It often doesn't + // make sense to modify the size or digest without touching the + // mediatype, as well, for example. + updated.Target = image.Target + case "annotations": + updated.Target.Annotations = image.Target.Annotations + default: + return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on image %q", path, image.Name) + } + } + } else { + updated = image + } + + if err := validateImage(&updated); err != nil { + return err + } + + updated.CreatedAt = createdat + updated.UpdatedAt = time.Now().UTC() + return writeImage(ibkt, &updated) + }); err != nil { + return images.Image{}, err + } + + return updated, nil + +} + +func (s *imageStore) Delete(ctx context.Context, name string, opts ...images.DeleteOpt) error { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + return update(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getImagesBucket(tx, namespace) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "image %q", name) + } + + if err = bkt.DeleteBucket([]byte(name)); err != nil { + if err == bolt.ErrBucketNotFound { + err = errors.Wrapf(errdefs.ErrNotFound, "image %q", name) + } + return err + } + + atomic.AddUint32(&s.db.dirty, 1) + + return nil + }) +} + +func validateImage(image *images.Image) error { + if image.Name == "" { + return errors.Wrapf(errdefs.ErrInvalidArgument, "image name must not be empty") + } + + for k, v := range image.Labels { + if err := labels.Validate(k, v); err != nil { + return errors.Wrapf(err, "image.Labels") + } + } + + return validateTarget(&image.Target) +} + +func validateTarget(target *ocispec.Descriptor) error { + // NOTE(stevvooe): Only validate fields we actually store. + + if err := target.Digest.Validate(); err != nil { + return errors.Wrapf(errdefs.ErrInvalidArgument, "Target.Digest %q invalid: %v", target.Digest, err) + } + + if target.Size <= 0 { + return errors.Wrapf(errdefs.ErrInvalidArgument, "Target.Size must be greater than zero") + } + + if target.MediaType == "" { + return errors.Wrapf(errdefs.ErrInvalidArgument, "Target.MediaType must be set") + } + + return nil +} + +func readImage(image *images.Image, bkt *bolt.Bucket) error { + if err := boltutil.ReadTimestamps(bkt, &image.CreatedAt, &image.UpdatedAt); err != nil { + return err + } + + labels, err := boltutil.ReadLabels(bkt) + if err != nil { + return err + } + image.Labels = labels + + image.Target.Annotations, err = boltutil.ReadAnnotations(bkt) + if err != nil { + return err + } + + tbkt := bkt.Bucket(bucketKeyTarget) + if tbkt == nil { + return errors.New("unable to read target bucket") + } + return tbkt.ForEach(func(k, v []byte) error { + if v == nil { + return nil // skip it? a bkt maybe? + } + + // TODO(stevvooe): This is why we need to use byte values for + // keys, rather than full arrays. + switch string(k) { + case string(bucketKeyDigest): + image.Target.Digest = digest.Digest(v) + case string(bucketKeyMediaType): + image.Target.MediaType = string(v) + case string(bucketKeySize): + image.Target.Size, _ = binary.Varint(v) + } + + return nil + }) +} + +func writeImage(bkt *bolt.Bucket, image *images.Image) error { + if err := boltutil.WriteTimestamps(bkt, image.CreatedAt, image.UpdatedAt); err != nil { + return err + } + + if err := boltutil.WriteLabels(bkt, image.Labels); err != nil { + return errors.Wrapf(err, "writing labels for image %v", image.Name) + } + + if err := boltutil.WriteAnnotations(bkt, image.Target.Annotations); err != nil { + return errors.Wrapf(err, "writing Annotations for image %v", image.Name) + } + + // write the target bucket + tbkt, err := bkt.CreateBucketIfNotExists(bucketKeyTarget) + if err != nil { + return err + } + + sizeEncoded, err := encodeInt(image.Target.Size) + if err != nil { + return err + } + + for _, v := range [][2][]byte{ + {bucketKeyDigest, []byte(image.Target.Digest)}, + {bucketKeyMediaType, []byte(image.Target.MediaType)}, + {bucketKeySize, sizeEncoded}, + } { + if err := tbkt.Put(v[0], v[1]); err != nil { + return err + } + } + + return nil +} + +func encodeInt(i int64) ([]byte, error) { + var ( + buf [binary.MaxVarintLen64]byte + iEncoded = buf[:] + ) + iEncoded = iEncoded[:binary.PutVarint(iEncoded, i)] + + if len(iEncoded) == 0 { + return nil, fmt.Errorf("failed encoding integer = %v", i) + } + return iEncoded, nil +} diff --git a/vendor/github.com/containerd/containerd/metadata/leases.go b/vendor/github.com/containerd/containerd/metadata/leases.go new file mode 100644 index 00000000..60da06b0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/leases.go @@ -0,0 +1,495 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + "fmt" + "strings" + "sync/atomic" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/namespaces" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +// LeaseManager manages the create/delete lifecycle of leases +// and also returns existing leases +type LeaseManager struct { + db *DB +} + +// NewLeaseManager creates a new lease manager for managing leases using +// the provided database transaction. +func NewLeaseManager(db *DB) *LeaseManager { + return &LeaseManager{ + db: db, + } +} + +// Create creates a new lease using the provided lease +func (lm *LeaseManager) Create(ctx context.Context, opts ...leases.Opt) (leases.Lease, error) { + var l leases.Lease + for _, opt := range opts { + if err := opt(&l); err != nil { + return leases.Lease{}, err + } + } + if l.ID == "" { + return leases.Lease{}, errors.New("lease id must be provided") + } + + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return leases.Lease{}, err + } + + if err := update(ctx, lm.db, func(tx *bolt.Tx) error { + topbkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases) + if err != nil { + return err + } + + txbkt, err := topbkt.CreateBucket([]byte(l.ID)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errdefs.ErrAlreadyExists + } + return errors.Wrapf(err, "lease %q", l.ID) + } + + t := time.Now().UTC() + createdAt, err := t.MarshalBinary() + if err != nil { + return err + } + if err := txbkt.Put(bucketKeyCreatedAt, createdAt); err != nil { + return err + } + + if l.Labels != nil { + if err := boltutil.WriteLabels(txbkt, l.Labels); err != nil { + return err + } + } + l.CreatedAt = t + + return nil + }); err != nil { + return leases.Lease{}, err + } + return l, nil +} + +// Delete deletes the lease with the provided lease ID +func (lm *LeaseManager) Delete(ctx context.Context, lease leases.Lease, _ ...leases.DeleteOpt) error { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + return update(ctx, lm.db, func(tx *bolt.Tx) error { + topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases) + if topbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + } + if err := topbkt.DeleteBucket([]byte(lease.ID)); err != nil { + if err == bolt.ErrBucketNotFound { + err = errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + } + return err + } + + atomic.AddUint32(&lm.db.dirty, 1) + + return nil + }) +} + +// List lists all active leases +func (lm *LeaseManager) List(ctx context.Context, fs ...string) ([]leases.Lease, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + filter, err := filters.ParseAll(fs...) + if err != nil { + return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) + } + + var ll []leases.Lease + + if err := view(ctx, lm.db, func(tx *bolt.Tx) error { + topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases) + if topbkt == nil { + return nil + } + + return topbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + txbkt := topbkt.Bucket(k) + + l := leases.Lease{ + ID: string(k), + } + + if v := txbkt.Get(bucketKeyCreatedAt); v != nil { + t := &l.CreatedAt + if err := t.UnmarshalBinary(v); err != nil { + return err + } + } + + labels, err := boltutil.ReadLabels(txbkt) + if err != nil { + return err + } + l.Labels = labels + + if filter.Match(adaptLease(l)) { + ll = append(ll, l) + } + + return nil + }) + }); err != nil { + return nil, err + } + + return ll, nil +} + +// AddResource references the resource by the provided lease. +func (lm *LeaseManager) AddResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + return update(ctx, lm.db, func(tx *bolt.Tx) error { + topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lease.ID)) + if topbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + } + + keys, ref, err := parseLeaseResource(r) + if err != nil { + return err + } + + bkt := topbkt + for _, key := range keys { + bkt, err = bkt.CreateBucketIfNotExists([]byte(key)) + if err != nil { + return err + } + } + return bkt.Put([]byte(ref), nil) + }) +} + +// DeleteResource dereferences the resource by the provided lease. +func (lm *LeaseManager) DeleteResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + return update(ctx, lm.db, func(tx *bolt.Tx) error { + topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lease.ID)) + if topbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + } + + keys, ref, err := parseLeaseResource(r) + if err != nil { + return err + } + + bkt := topbkt + for _, key := range keys { + if bkt == nil { + break + } + bkt = bkt.Bucket([]byte(key)) + } + + if bkt != nil { + if err := bkt.Delete([]byte(ref)); err != nil { + return err + } + } + + atomic.AddUint32(&lm.db.dirty, 1) + + return nil + }) +} + +// ListResources lists all the resources referenced by the lease. +func (lm *LeaseManager) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + var rs []leases.Resource + + if err := view(ctx, lm.db, func(tx *bolt.Tx) error { + + topbkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lease.ID)) + if topbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "lease %q", lease.ID) + } + + // content resources + if cbkt := topbkt.Bucket(bucketKeyObjectContent); cbkt != nil { + if err := cbkt.ForEach(func(k, _ []byte) error { + rs = append(rs, leases.Resource{ + ID: string(k), + Type: string(bucketKeyObjectContent), + }) + + return nil + }); err != nil { + return err + } + } + + // ingest resources + if lbkt := topbkt.Bucket(bucketKeyObjectIngests); lbkt != nil { + if err := lbkt.ForEach(func(k, _ []byte) error { + rs = append(rs, leases.Resource{ + ID: string(k), + Type: string(bucketKeyObjectIngests), + }) + + return nil + }); err != nil { + return err + } + } + + // snapshot resources + if sbkt := topbkt.Bucket(bucketKeyObjectSnapshots); sbkt != nil { + if err := sbkt.ForEach(func(sk, sv []byte) error { + if sv != nil { + return nil + } + + snbkt := sbkt.Bucket(sk) + return snbkt.ForEach(func(k, _ []byte) error { + rs = append(rs, leases.Resource{ + ID: string(k), + Type: fmt.Sprintf("%s/%s", bucketKeyObjectSnapshots, sk), + }) + return nil + }) + }); err != nil { + return err + } + } + + return nil + }); err != nil { + return nil, err + } + return rs, nil +} + +func addSnapshotLease(ctx context.Context, tx *bolt.Tx, snapshotter, key string) error { + lid, ok := leases.FromContext(ctx) + if !ok { + return nil + } + + namespace, ok := namespaces.Namespace(ctx) + if !ok { + panic("namespace must already be checked") + } + + bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid)) + if bkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "lease does not exist") + } + + bkt, err := bkt.CreateBucketIfNotExists(bucketKeyObjectSnapshots) + if err != nil { + return err + } + + bkt, err = bkt.CreateBucketIfNotExists([]byte(snapshotter)) + if err != nil { + return err + } + + return bkt.Put([]byte(key), nil) +} + +func removeSnapshotLease(ctx context.Context, tx *bolt.Tx, snapshotter, key string) error { + lid, ok := leases.FromContext(ctx) + if !ok { + return nil + } + + namespace, ok := namespaces.Namespace(ctx) + if !ok { + panic("namespace must already be checked") + } + + bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid), bucketKeyObjectSnapshots, []byte(snapshotter)) + if bkt == nil { + // Key does not exist so we return nil + return nil + } + + return bkt.Delete([]byte(key)) +} + +func addContentLease(ctx context.Context, tx *bolt.Tx, dgst digest.Digest) error { + lid, ok := leases.FromContext(ctx) + if !ok { + return nil + } + + namespace, ok := namespaces.Namespace(ctx) + if !ok { + panic("namespace must already be required") + } + + bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid)) + if bkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "lease does not exist") + } + + bkt, err := bkt.CreateBucketIfNotExists(bucketKeyObjectContent) + if err != nil { + return err + } + + return bkt.Put([]byte(dgst.String()), nil) +} + +func removeContentLease(ctx context.Context, tx *bolt.Tx, dgst digest.Digest) error { + lid, ok := leases.FromContext(ctx) + if !ok { + return nil + } + + namespace, ok := namespaces.Namespace(ctx) + if !ok { + panic("namespace must already be checked") + } + + bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid), bucketKeyObjectContent) + if bkt == nil { + // Key does not exist so we return nil + return nil + } + + return bkt.Delete([]byte(dgst.String())) +} + +func addIngestLease(ctx context.Context, tx *bolt.Tx, ref string) (bool, error) { + lid, ok := leases.FromContext(ctx) + if !ok { + return false, nil + } + + namespace, ok := namespaces.Namespace(ctx) + if !ok { + panic("namespace must already be required") + } + + bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid)) + if bkt == nil { + return false, errors.Wrap(errdefs.ErrNotFound, "lease does not exist") + } + + bkt, err := bkt.CreateBucketIfNotExists(bucketKeyObjectIngests) + if err != nil { + return false, err + } + + if err := bkt.Put([]byte(ref), nil); err != nil { + return false, err + } + + return true, nil +} + +func removeIngestLease(ctx context.Context, tx *bolt.Tx, ref string) error { + lid, ok := leases.FromContext(ctx) + if !ok { + return nil + } + + namespace, ok := namespaces.Namespace(ctx) + if !ok { + panic("namespace must already be checked") + } + + bkt := getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectLeases, []byte(lid), bucketKeyObjectIngests) + if bkt == nil { + // Key does not exist so we return nil + return nil + } + + return bkt.Delete([]byte(ref)) +} + +func parseLeaseResource(r leases.Resource) ([]string, string, error) { + var ( + ref = r.ID + typ = r.Type + keys = strings.Split(typ, "/") + ) + + switch k := keys[0]; k { + case string(bucketKeyObjectContent), + string(bucketKeyObjectIngests): + + if len(keys) != 1 { + return nil, "", errors.Wrapf(errdefs.ErrInvalidArgument, "invalid resource type %s", typ) + } + + if k == string(bucketKeyObjectContent) { + dgst, err := digest.Parse(ref) + if err != nil { + return nil, "", errors.Wrapf(errdefs.ErrInvalidArgument, "invalid content resource id %s: %v", ref, err) + } + ref = dgst.String() + } + case string(bucketKeyObjectSnapshots): + if len(keys) != 2 { + return nil, "", errors.Wrapf(errdefs.ErrInvalidArgument, "invalid snapshot resource type %s", typ) + } + default: + return nil, "", errors.Wrapf(errdefs.ErrNotImplemented, "resource type %s not supported yet", typ) + } + + return keys, ref, nil +} diff --git a/vendor/github.com/containerd/containerd/metadata/migrations.go b/vendor/github.com/containerd/containerd/metadata/migrations.go new file mode 100644 index 00000000..34febdd1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/migrations.go @@ -0,0 +1,168 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import bolt "go.etcd.io/bbolt" + +type migration struct { + schema string + version int + migrate func(*bolt.Tx) error +} + +// migrations stores the list of database migrations +// for each update to the database schema. The migrations +// array MUST be ordered by version from least to greatest. +// The last entry in the array should correspond to the +// schemaVersion and dbVersion constants. +// A migration test MUST be added for each migration in +// the array. +// The migrate function can safely assume the version +// of the data it is migrating from is the previous version +// of the database. +var migrations = []migration{ + { + schema: "v1", + version: 1, + migrate: addChildLinks, + }, + { + schema: "v1", + version: 2, + migrate: migrateIngests, + }, + { + schema: "v1", + version: 3, + migrate: noOpMigration, + }, +} + +// addChildLinks Adds children key to the snapshotters to enforce snapshot +// entries cannot be removed which have children +func addChildLinks(tx *bolt.Tx) error { + v1bkt := tx.Bucket(bucketKeyVersion) + if v1bkt == nil { + return nil + } + + // iterate through each namespace + v1c := v1bkt.Cursor() + + for k, v := v1c.First(); k != nil; k, v = v1c.Next() { + if v != nil { + continue + } + nbkt := v1bkt.Bucket(k) + + sbkt := nbkt.Bucket(bucketKeyObjectSnapshots) + if sbkt != nil { + // Iterate through each snapshotter + if err := sbkt.ForEach(func(sk, sv []byte) error { + if sv != nil { + return nil + } + snbkt := sbkt.Bucket(sk) + + // Iterate through each snapshot + return snbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + parent := snbkt.Bucket(k).Get(bucketKeyParent) + if len(parent) > 0 { + pbkt := snbkt.Bucket(parent) + if pbkt == nil { + // Not enforcing consistency during migration, skip + return nil + } + cbkt, err := pbkt.CreateBucketIfNotExists(bucketKeyChildren) + if err != nil { + return err + } + if err := cbkt.Put(k, nil); err != nil { + return err + } + } + + return nil + }) + }); err != nil { + return err + } + } + } + + return nil +} + +// migrateIngests moves ingests from the key/value ingest bucket +// to a structured ingest bucket for storing additional state about +// an ingest. +func migrateIngests(tx *bolt.Tx) error { + v1bkt := tx.Bucket(bucketKeyVersion) + if v1bkt == nil { + return nil + } + + // iterate through each namespace + v1c := v1bkt.Cursor() + + for k, v := v1c.First(); k != nil; k, v = v1c.Next() { + if v != nil { + continue + } + bkt := v1bkt.Bucket(k).Bucket(bucketKeyObjectContent) + if bkt == nil { + continue + } + + dbkt := bkt.Bucket(deprecatedBucketKeyObjectIngest) + if dbkt == nil { + continue + } + + // Create new ingests bucket + nbkt, err := bkt.CreateBucketIfNotExists(bucketKeyObjectIngests) + if err != nil { + return err + } + + if err := dbkt.ForEach(func(ref, bref []byte) error { + ibkt, err := nbkt.CreateBucketIfNotExists(ref) + if err != nil { + return err + } + return ibkt.Put(bucketKeyRef, bref) + }); err != nil { + return err + } + + if err := bkt.DeleteBucket(deprecatedBucketKeyObjectIngest); err != nil { + return err + } + } + + return nil +} + +// noOpMigration was for a database change from boltdb/bolt which is no +// longer being supported, to go.etcd.io/bbolt which is the currently +// maintained repo for boltdb. +func noOpMigration(tx *bolt.Tx) error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/metadata/namespaces.go b/vendor/github.com/containerd/containerd/metadata/namespaces.go new file mode 100644 index 00000000..23615e48 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/namespaces.go @@ -0,0 +1,195 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/identifiers" + l "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/namespaces" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +type namespaceStore struct { + tx *bolt.Tx +} + +// NewNamespaceStore returns a store backed by a bolt DB +func NewNamespaceStore(tx *bolt.Tx) namespaces.Store { + return &namespaceStore{tx: tx} +} + +func (s *namespaceStore) Create(ctx context.Context, namespace string, labels map[string]string) error { + topbkt, err := createBucketIfNotExists(s.tx, bucketKeyVersion) + if err != nil { + return err + } + + if err := identifiers.Validate(namespace); err != nil { + return err + } + + for k, v := range labels { + if err := l.Validate(k, v); err != nil { + return errors.Wrapf(err, "namespace.Labels") + } + } + + // provides the already exists error. + bkt, err := topbkt.CreateBucket([]byte(namespace)) + if err != nil { + if err == bolt.ErrBucketExists { + return errors.Wrapf(errdefs.ErrAlreadyExists, "namespace %q", namespace) + } + + return err + } + + lbkt, err := bkt.CreateBucketIfNotExists(bucketKeyObjectLabels) + if err != nil { + return err + } + + for k, v := range labels { + if err := lbkt.Put([]byte(k), []byte(v)); err != nil { + return err + } + } + + return nil +} + +func (s *namespaceStore) Labels(ctx context.Context, namespace string) (map[string]string, error) { + labels := map[string]string{} + + bkt := getNamespaceLabelsBucket(s.tx, namespace) + if bkt == nil { + return labels, nil + } + + if err := bkt.ForEach(func(k, v []byte) error { + labels[string(k)] = string(v) + return nil + }); err != nil { + return nil, err + } + + return labels, nil +} + +func (s *namespaceStore) SetLabel(ctx context.Context, namespace, key, value string) error { + if err := l.Validate(key, value); err != nil { + return errors.Wrapf(err, "namespace.Labels") + } + + return withNamespacesLabelsBucket(s.tx, namespace, func(bkt *bolt.Bucket) error { + if value == "" { + return bkt.Delete([]byte(key)) + } + + return bkt.Put([]byte(key), []byte(value)) + }) + +} + +func (s *namespaceStore) List(ctx context.Context) ([]string, error) { + bkt := getBucket(s.tx, bucketKeyVersion) + if bkt == nil { + return nil, nil // no namespaces! + } + + var namespaces []string + if err := bkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil // not a bucket + } + + namespaces = append(namespaces, string(k)) + return nil + }); err != nil { + return nil, err + } + + return namespaces, nil +} + +func (s *namespaceStore) Delete(ctx context.Context, namespace string, opts ...namespaces.DeleteOpts) error { + i := &namespaces.DeleteInfo{ + Name: namespace, + } + for _, o := range opts { + if err := o(ctx, i); err != nil { + return err + } + } + bkt := getBucket(s.tx, bucketKeyVersion) + if empty, err := s.namespaceEmpty(ctx, namespace); err != nil { + return err + } else if !empty { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace %q must be empty", namespace) + } + + if err := bkt.DeleteBucket([]byte(namespace)); err != nil { + if err == bolt.ErrBucketNotFound { + return errors.Wrapf(errdefs.ErrNotFound, "namespace %q", namespace) + } + + return err + } + + return nil +} + +func (s *namespaceStore) namespaceEmpty(ctx context.Context, namespace string) (bool, error) { + // Get all data buckets + buckets := []*bolt.Bucket{ + getImagesBucket(s.tx, namespace), + getBlobsBucket(s.tx, namespace), + getContainersBucket(s.tx, namespace), + } + if snbkt := getSnapshottersBucket(s.tx, namespace); snbkt != nil { + if err := snbkt.ForEach(func(k, v []byte) error { + if v == nil { + buckets = append(buckets, snbkt.Bucket(k)) + } + return nil + }); err != nil { + return false, err + } + } + + // Ensure data buckets are empty + for _, bkt := range buckets { + if !isBucketEmpty(bkt) { + return false, nil + } + } + + return true, nil +} + +func isBucketEmpty(bkt *bolt.Bucket) bool { + if bkt == nil { + return true + } + + k, _ := bkt.Cursor().First() + return k == nil +} diff --git a/vendor/github.com/containerd/containerd/metadata/snapshot.go b/vendor/github.com/containerd/containerd/metadata/snapshot.go new file mode 100644 index 00000000..389aeda4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/snapshot.go @@ -0,0 +1,940 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + "fmt" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/snapshots" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +const ( + inheritedLabelsPrefix = "containerd.io/snapshot/" + labelSnapshotRef = "containerd.io/snapshot.ref" +) + +type snapshotter struct { + snapshots.Snapshotter + name string + db *DB + l sync.RWMutex +} + +// newSnapshotter returns a new Snapshotter which namespaces the given snapshot +// using the provided name and database. +func newSnapshotter(db *DB, name string, sn snapshots.Snapshotter) *snapshotter { + return &snapshotter{ + Snapshotter: sn, + name: name, + db: db, + } +} + +func createKey(id uint64, namespace, key string) string { + return fmt.Sprintf("%s/%d/%s", namespace, id, key) +} + +func getKey(tx *bolt.Tx, ns, name, key string) string { + bkt := getSnapshotterBucket(tx, ns, name) + if bkt == nil { + return "" + } + bkt = bkt.Bucket([]byte(key)) + if bkt == nil { + return "" + } + v := bkt.Get(bucketKeyName) + if len(v) == 0 { + return "" + } + return string(v) +} + +func (s *snapshotter) resolveKey(ctx context.Context, key string) (string, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return "", err + } + + var id string + if err := view(ctx, s.db, func(tx *bolt.Tx) error { + id = getKey(tx, ns, s.name, key) + if id == "" { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key) + } + return nil + }); err != nil { + return "", err + } + + return id, nil +} + +func (s *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return snapshots.Info{}, err + } + + var ( + bkey string + local = snapshots.Info{ + Name: key, + } + ) + if err := view(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getSnapshotterBucket(tx, ns, s.name) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key) + } + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key) + } + local.Labels, err = boltutil.ReadLabels(sbkt) + if err != nil { + return errors.Wrap(err, "failed to read labels") + } + if err := boltutil.ReadTimestamps(sbkt, &local.Created, &local.Updated); err != nil { + return errors.Wrap(err, "failed to read timestamps") + } + bkey = string(sbkt.Get(bucketKeyName)) + local.Parent = string(sbkt.Get(bucketKeyParent)) + + return nil + }); err != nil { + return snapshots.Info{}, err + } + + info, err := s.Snapshotter.Stat(ctx, bkey) + if err != nil { + return snapshots.Info{}, err + } + + return overlayInfo(info, local), nil +} + +func (s *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { + s.l.RLock() + defer s.l.RUnlock() + + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return snapshots.Info{}, err + } + + if info.Name == "" { + return snapshots.Info{}, errors.Wrap(errdefs.ErrInvalidArgument, "") + } + + var ( + bkey string + local = snapshots.Info{ + Name: info.Name, + } + updated bool + ) + if err := update(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getSnapshotterBucket(tx, ns, s.name) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", info.Name) + } + sbkt := bkt.Bucket([]byte(info.Name)) + if sbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", info.Name) + } + + local.Labels, err = boltutil.ReadLabels(sbkt) + if err != nil { + return errors.Wrap(err, "failed to read labels") + } + if err := boltutil.ReadTimestamps(sbkt, &local.Created, &local.Updated); err != nil { + return errors.Wrap(err, "failed to read timestamps") + } + + // Handle field updates + if len(fieldpaths) > 0 { + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if local.Labels == nil { + local.Labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + local.Labels[key] = info.Labels[key] + continue + } + + switch path { + case "labels": + local.Labels = info.Labels + default: + return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on snapshot %q", path, info.Name) + } + } + } else { + local.Labels = info.Labels + } + if err := validateSnapshot(&local); err != nil { + return err + } + local.Updated = time.Now().UTC() + + if err := boltutil.WriteTimestamps(sbkt, local.Created, local.Updated); err != nil { + return errors.Wrap(err, "failed to read timestamps") + } + if err := boltutil.WriteLabels(sbkt, local.Labels); err != nil { + return errors.Wrap(err, "failed to read labels") + } + bkey = string(sbkt.Get(bucketKeyName)) + local.Parent = string(sbkt.Get(bucketKeyParent)) + + inner := snapshots.Info{ + Name: bkey, + Labels: snapshots.FilterInheritedLabels(local.Labels), + } + + // NOTE: Perform this inside the transaction to reduce the + // chances of out of sync data. The backend snapshotters + // should perform the Update as fast as possible. + if info, err = s.Snapshotter.Update(ctx, inner, fieldpaths...); err != nil { + return err + } + updated = true + + return nil + }); err != nil { + if updated { + log.G(ctx).WithField("snapshotter", s.name).WithField("key", local.Name).WithError(err).Error("transaction failed after updating snapshot backend") + } + return snapshots.Info{}, err + } + + return overlayInfo(info, local), nil +} + +func overlayInfo(info, overlay snapshots.Info) snapshots.Info { + // Merge info + info.Name = overlay.Name + info.Created = overlay.Created + info.Updated = overlay.Updated + info.Parent = overlay.Parent + if info.Labels == nil { + info.Labels = overlay.Labels + } else { + for k, v := range overlay.Labels { + info.Labels[k] = v + } + } + return info +} + +func (s *snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) { + bkey, err := s.resolveKey(ctx, key) + if err != nil { + return snapshots.Usage{}, err + } + return s.Snapshotter.Usage(ctx, bkey) +} + +func (s *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) { + bkey, err := s.resolveKey(ctx, key) + if err != nil { + return nil, err + } + return s.Snapshotter.Mounts(ctx, bkey) +} + +func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + return s.createSnapshot(ctx, key, parent, false, opts) +} + +func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + return s.createSnapshot(ctx, key, parent, true, opts) +} + +func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, readonly bool, opts []snapshots.Opt) ([]mount.Mount, error) { + s.l.RLock() + defer s.l.RUnlock() + + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + var base snapshots.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return nil, err + } + } + + if err := validateSnapshot(&base); err != nil { + return nil, err + } + + var ( + target = base.Labels[labelSnapshotRef] + bparent string + bkey string + bopts = []snapshots.Opt{ + snapshots.WithLabels(snapshots.FilterInheritedLabels(base.Labels)), + } + ) + + if err := update(ctx, s.db, func(tx *bolt.Tx) error { + bkt, err := createSnapshotterBucket(tx, ns, s.name) + if err != nil { + return err + } + + // Check if target exists, if so, return already exists + if target != "" { + if tbkt := bkt.Bucket([]byte(target)); tbkt != nil { + return errors.Wrapf(errdefs.ErrAlreadyExists, "target snapshot %q", target) + } + } + + if bbkt := bkt.Bucket([]byte(key)); bbkt != nil { + return errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %q", key) + } + + if parent != "" { + pbkt := bkt.Bucket([]byte(parent)) + if pbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "parent snapshot %v does not exist", parent) + } + bparent = string(pbkt.Get(bucketKeyName)) + } + + sid, err := bkt.NextSequence() + if err != nil { + return err + } + bkey = createKey(sid, ns, key) + + return err + }); err != nil { + return nil, err + } + + var ( + m []mount.Mount + created string + rerr error + ) + if readonly { + m, err = s.Snapshotter.View(ctx, bkey, bparent, bopts...) + } else { + m, err = s.Snapshotter.Prepare(ctx, bkey, bparent, bopts...) + } + + // An already exists error should indicate the backend found a snapshot + // matching a provided target reference. + if errdefs.IsAlreadyExists(err) { + if target != "" { + var tinfo *snapshots.Info + filter := fmt.Sprintf(`labels."containerd.io/snapshot.ref"==%s,parent==%q`, target, bparent) + if err := s.Snapshotter.Walk(ctx, func(ctx context.Context, i snapshots.Info) error { + if tinfo == nil && i.Kind == snapshots.KindCommitted { + if i.Labels["containerd.io/snapshot.ref"] != target { + // Walk did not respect filter + return nil + } + if i.Parent != bparent { + // Walk did not respect filter + return nil + } + tinfo = &i + } + return nil + + }, filter); err != nil { + return nil, errors.Wrap(err, "failed walking backend snapshots") + } + + if tinfo == nil { + return nil, errors.Wrapf(errdefs.ErrNotFound, "target snapshot %q in backend", target) + } + + key = target + bkey = tinfo.Name + bparent = tinfo.Parent + base.Created = tinfo.Created + base.Updated = tinfo.Updated + if base.Labels == nil { + base.Labels = tinfo.Labels + } else { + for k, v := range tinfo.Labels { + if _, ok := base.Labels[k]; !ok { + base.Labels[k] = v + } + } + } + + // Propagate this error after the final update + rerr = errors.Wrapf(errdefs.ErrAlreadyExists, "target snapshot %q from snapshotter", target) + } else { + // This condition is unexpected as the key provided is expected + // to be new and unique, return as unknown response from backend + // to avoid confusing callers handling already exists. + return nil, errors.Wrapf(errdefs.ErrUnknown, "unexpected error from snapshotter: %v", err) + } + } else if err != nil { + return nil, err + } else { + ts := time.Now().UTC() + base.Created = ts + base.Updated = ts + created = bkey + } + + if txerr := update(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getSnapshotterBucket(tx, ns, s.name) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "can not find snapshotter %q", s.name) + } + + if err := addSnapshotLease(ctx, tx, s.name, key); err != nil { + return err + } + + bbkt, err := bkt.CreateBucket([]byte(key)) + if err != nil { + if err != bolt.ErrBucketExists { + return err + } + if rerr == nil { + rerr = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %q", key) + } + return nil + } + + if parent != "" { + pbkt := bkt.Bucket([]byte(parent)) + if pbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "parent snapshot %v does not exist", parent) + } + + // Ensure the backend's parent matches the metadata store's parent + // If it is mismatched, then a target was provided for a snapshotter + // which has a different parent then requested. + // NOTE: The backend snapshotter is responsible for enforcing the + // uniqueness of the reference relationships, the metadata store + // can only error out to prevent inconsistent data. + if bparent != string(pbkt.Get(bucketKeyName)) { + return errors.Wrapf(errdefs.ErrInvalidArgument, "mismatched parent %s from target %s", parent, target) + } + + cbkt, err := pbkt.CreateBucketIfNotExists(bucketKeyChildren) + if err != nil { + return err + } + if err := cbkt.Put([]byte(key), nil); err != nil { + return err + } + + if err := bbkt.Put(bucketKeyParent, []byte(parent)); err != nil { + return err + } + } + + if err := boltutil.WriteTimestamps(bbkt, base.Created, base.Updated); err != nil { + return err + } + if err := boltutil.WriteLabels(bbkt, base.Labels); err != nil { + return err + } + + return bbkt.Put(bucketKeyName, []byte(bkey)) + }); txerr != nil { + rerr = txerr + } + + if rerr != nil { + // If the created reference is not stored, attempt clean up + if created != "" { + if err := s.Snapshotter.Remove(ctx, created); err != nil { + log.G(ctx).WithField("snapshotter", s.name).WithField("key", created).WithError(err).Error("failed to cleanup unreferenced snapshot") + } + } + return nil, rerr + } + + return m, nil +} + +func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { + s.l.RLock() + defer s.l.RUnlock() + + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + var base snapshots.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return err + } + } + + if err := validateSnapshot(&base); err != nil { + return err + } + + var bname string + if err := update(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getSnapshotterBucket(tx, ns, s.name) + if bkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, + "can not find snapshotter %q", s.name) + } + + bbkt, err := bkt.CreateBucket([]byte(name)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %q", name) + } + return err + } + if err := addSnapshotLease(ctx, tx, s.name, name); err != nil { + return err + } + + obkt := bkt.Bucket([]byte(key)) + if obkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key) + } + + bkey := string(obkt.Get(bucketKeyName)) + + sid, err := bkt.NextSequence() + if err != nil { + return err + } + + nameKey := createKey(sid, ns, name) + + if err := bbkt.Put(bucketKeyName, []byte(nameKey)); err != nil { + return err + } + + parent := obkt.Get(bucketKeyParent) + if len(parent) > 0 { + pbkt := bkt.Bucket(parent) + if pbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "parent snapshot %v does not exist", string(parent)) + } + + cbkt, err := pbkt.CreateBucketIfNotExists(bucketKeyChildren) + if err != nil { + return err + } + if err := cbkt.Delete([]byte(key)); err != nil { + return err + } + if err := cbkt.Put([]byte(name), nil); err != nil { + return err + } + + if err := bbkt.Put(bucketKeyParent, parent); err != nil { + return err + } + } + ts := time.Now().UTC() + if err := boltutil.WriteTimestamps(bbkt, ts, ts); err != nil { + return err + } + if err := boltutil.WriteLabels(bbkt, base.Labels); err != nil { + return err + } + + if err := bkt.DeleteBucket([]byte(key)); err != nil { + return err + } + if err := removeSnapshotLease(ctx, tx, s.name, key); err != nil { + return err + } + + inheritedOpt := snapshots.WithLabels(snapshots.FilterInheritedLabels(base.Labels)) + + // NOTE: Backend snapshotters should commit fast and reliably to + // prevent metadata store locking and minimizing rollbacks. + // This operation should be done in the transaction to minimize the + // risk of the committed keys becoming out of sync. If this operation + // succeed and the overall transaction fails then the risk of out of + // sync data is higher and may require manual cleanup. + if err := s.Snapshotter.Commit(ctx, nameKey, bkey, inheritedOpt); err != nil { + if errdefs.IsNotFound(err) { + log.G(ctx).WithField("snapshotter", s.name).WithField("key", key).WithError(err).Error("uncommittable snapshot: missing in backend, snapshot should be removed") + } + // NOTE: Consider handling already exists here from the backend. Currently + // already exists from the backend may be confusing to the client since it + // may require the client to re-attempt from prepare. However, if handling + // here it is not clear what happened with the existing backend key and + // whether the already prepared snapshot would still be used or must be + // discarded. It is best that all implementations of the snapshotter + // interface behave the same, in which case the backend should handle the + // mapping of duplicates and not error. + return err + } + bname = nameKey + + return nil + }); err != nil { + if bname != "" { + log.G(ctx).WithField("snapshotter", s.name).WithField("key", key).WithField("bname", bname).WithError(err).Error("uncommittable snapshot: transaction failed after commit, snapshot should be removed") + + } + return err + } + + return nil + +} + +func (s *snapshotter) Remove(ctx context.Context, key string) error { + s.l.RLock() + defer s.l.RUnlock() + + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + return update(ctx, s.db, func(tx *bolt.Tx) error { + var sbkt *bolt.Bucket + bkt := getSnapshotterBucket(tx, ns, s.name) + if bkt != nil { + sbkt = bkt.Bucket([]byte(key)) + } + if sbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key) + } + + cbkt := sbkt.Bucket(bucketKeyChildren) + if cbkt != nil { + if child, _ := cbkt.Cursor().First(); child != nil { + return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot remove snapshot with child") + } + } + + parent := sbkt.Get(bucketKeyParent) + if len(parent) > 0 { + pbkt := bkt.Bucket(parent) + if pbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "parent snapshot %v does not exist", string(parent)) + } + cbkt := pbkt.Bucket(bucketKeyChildren) + if cbkt != nil { + if err := cbkt.Delete([]byte(key)); err != nil { + return errors.Wrap(err, "failed to remove child link") + } + } + } + + if err := bkt.DeleteBucket([]byte(key)); err != nil { + return err + } + if err := removeSnapshotLease(ctx, tx, s.name, key); err != nil { + return err + } + + // Mark snapshotter as dirty for triggering garbage collection + atomic.AddUint32(&s.db.dirty, 1) + s.db.dirtySS[s.name] = struct{}{} + + return nil + }) +} + +type infoPair struct { + bkey string + info snapshots.Info +} + +func (s *snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + var ( + batchSize = 100 + pairs = []infoPair{} + lastKey string + ) + + filter, err := filters.ParseAll(fs...) + if err != nil { + return err + } + + for { + if err := view(ctx, s.db, func(tx *bolt.Tx) error { + bkt := getSnapshotterBucket(tx, ns, s.name) + if bkt == nil { + return nil + } + + c := bkt.Cursor() + + var k, v []byte + if lastKey == "" { + k, v = c.First() + } else { + k, v = c.Seek([]byte(lastKey)) + } + + for k != nil { + if v == nil { + if len(pairs) >= batchSize { + break + } + sbkt := bkt.Bucket(k) + + pair := infoPair{ + bkey: string(sbkt.Get(bucketKeyName)), + info: snapshots.Info{ + Name: string(k), + Parent: string(sbkt.Get(bucketKeyParent)), + }, + } + + err := boltutil.ReadTimestamps(sbkt, &pair.info.Created, &pair.info.Updated) + if err != nil { + return err + } + pair.info.Labels, err = boltutil.ReadLabels(sbkt) + if err != nil { + return err + } + + pairs = append(pairs, pair) + } + + k, v = c.Next() + } + + lastKey = string(k) + + return nil + }); err != nil { + return err + } + + for _, pair := range pairs { + info, err := s.Snapshotter.Stat(ctx, pair.bkey) + if err != nil { + if errdefs.IsNotFound(err) { + continue + } + return err + } + + info = overlayInfo(info, pair.info) + if filter.Match(adaptSnapshot(info)) { + if err := fn(ctx, info); err != nil { + return err + } + } + } + + if lastKey == "" { + break + } + + pairs = pairs[:0] + + } + + return nil +} + +func validateSnapshot(info *snapshots.Info) error { + for k, v := range info.Labels { + if err := labels.Validate(k, v); err != nil { + return errors.Wrapf(err, "info.Labels") + } + } + + return nil +} + +func (s *snapshotter) garbageCollect(ctx context.Context) (d time.Duration, err error) { + s.l.Lock() + t1 := time.Now() + defer func() { + s.l.Unlock() + if err == nil { + if c, ok := s.Snapshotter.(snapshots.Cleaner); ok { + err = c.Cleanup(ctx) + if errdefs.IsNotImplemented(err) { + err = nil + } + } + } + if err == nil { + d = time.Since(t1) + } + }() + + seen := map[string]struct{}{} + if err := s.db.View(func(tx *bolt.Tx) error { + v1bkt := tx.Bucket(bucketKeyVersion) + if v1bkt == nil { + return nil + } + + // iterate through each namespace + v1c := v1bkt.Cursor() + + for k, v := v1c.First(); k != nil; k, v = v1c.Next() { + if v != nil { + continue + } + + sbkt := v1bkt.Bucket(k).Bucket(bucketKeyObjectSnapshots) + if sbkt == nil { + continue + } + + // Load specific snapshotter + ssbkt := sbkt.Bucket([]byte(s.name)) + if ssbkt == nil { + continue + } + + if err := ssbkt.ForEach(func(sk, sv []byte) error { + if sv == nil { + bkey := ssbkt.Bucket(sk).Get(bucketKeyName) + if len(bkey) > 0 { + seen[string(bkey)] = struct{}{} + } + } + return nil + }); err != nil { + return err + } + } + + return nil + }); err != nil { + return 0, err + } + + roots, err := s.walkTree(ctx, seen) + if err != nil { + return 0, err + } + + // TODO: Unlock before removal (once nodes are fully unavailable). + // This could be achieved through doing prune inside the lock + // and having a cleanup method which actually performs the + // deletions on the snapshotters which support it. + + for _, node := range roots { + if err := s.pruneBranch(ctx, node); err != nil { + return 0, err + } + } + + return +} + +type treeNode struct { + info snapshots.Info + remove bool + children []*treeNode +} + +func (s *snapshotter) walkTree(ctx context.Context, seen map[string]struct{}) ([]*treeNode, error) { + roots := []*treeNode{} + nodes := map[string]*treeNode{} + + if err := s.Snapshotter.Walk(ctx, func(ctx context.Context, info snapshots.Info) error { + _, isSeen := seen[info.Name] + node, ok := nodes[info.Name] + if !ok { + node = &treeNode{} + nodes[info.Name] = node + } + + node.remove = !isSeen + node.info = info + + if info.Parent == "" { + roots = append(roots, node) + } else { + parent, ok := nodes[info.Parent] + if !ok { + parent = &treeNode{} + nodes[info.Parent] = parent + } + parent.children = append(parent.children, node) + } + + return nil + }); err != nil { + return nil, err + } + + return roots, nil +} + +func (s *snapshotter) pruneBranch(ctx context.Context, node *treeNode) error { + for _, child := range node.children { + if err := s.pruneBranch(ctx, child); err != nil { + return err + } + } + + if node.remove { + logger := log.G(ctx).WithField("snapshotter", s.name) + if err := s.Snapshotter.Remove(ctx, node.info.Name); err != nil { + if !errdefs.IsFailedPrecondition(err) { + return err + } + logger.WithError(err).WithField("key", node.info.Name).Warnf("failed to remove snapshot") + } else { + logger.WithField("key", node.info.Name).Debug("removed snapshot") + } + } + + return nil +} + +// Close closes s.Snapshotter but not db +func (s *snapshotter) Close() error { + return s.Snapshotter.Close() +} diff --git a/vendor/github.com/containerd/containerd/mount/lookup_unix.go b/vendor/github.com/containerd/containerd/mount/lookup_unix.go new file mode 100644 index 00000000..e8b0a0b4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/mount/lookup_unix.go @@ -0,0 +1,53 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +import ( + "path/filepath" + "sort" + "strings" + + "github.com/pkg/errors" +) + +// Lookup returns the mount info corresponds to the path. +func Lookup(dir string) (Info, error) { + dir = filepath.Clean(dir) + + mounts, err := Self() + if err != nil { + return Info{}, err + } + + // Sort descending order by Info.Mountpoint + sort.SliceStable(mounts, func(i, j int) bool { + return mounts[j].Mountpoint < mounts[i].Mountpoint + }) + for _, m := range mounts { + // Note that m.{Major, Minor} are generally unreliable for our purpose here + // https://www.spinics.net/lists/linux-btrfs/msg58908.html + // Note that device number is not checked here, because for overlayfs files + // may have different device number with the mountpoint. + if strings.HasPrefix(dir, m.Mountpoint) { + return m, nil + } + } + + return Info{}, errors.Errorf("failed to find the mount info for %q", dir) +} diff --git a/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go b/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go new file mode 100644 index 00000000..46ec66a9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go @@ -0,0 +1,29 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +import ( + "fmt" + "runtime" +) + +// Lookup returns the mount info corresponds to the path. +func Lookup(dir string) (Info, error) { + return Info{}, fmt.Errorf("mount.Lookup is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/github.com/containerd/containerd/mount/mount.go b/vendor/github.com/containerd/containerd/mount/mount.go new file mode 100644 index 00000000..b25556b2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/mount/mount.go @@ -0,0 +1,40 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +// Mount is the lingua franca of containerd. A mount represents a +// serialized mount syscall. Components either emit or consume mounts. +type Mount struct { + // Type specifies the host-specific of the mount. + Type string + // Source specifies where to mount from. Depending on the host system, this + // can be a source path or device. + Source string + // Options contains zero or more fstab-style mount options. Typically, + // these are platform specific. + Options []string +} + +// All mounts all the provided mounts to the provided target +func All(mounts []Mount, target string) error { + for _, m := range mounts { + if err := m.Mount(target); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/mount/mount_linux.go b/vendor/github.com/containerd/containerd/mount/mount_linux.go new file mode 100644 index 00000000..a7edd455 --- /dev/null +++ b/vendor/github.com/containerd/containerd/mount/mount_linux.go @@ -0,0 +1,372 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +import ( + "fmt" + "os" + "os/exec" + "path" + "strings" + "time" + + "github.com/containerd/containerd/sys" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +var ( + pagesize = 4096 + allowedHelperBinaries = []string{"mount.fuse", "mount.fuse3"} +) + +func init() { + pagesize = os.Getpagesize() +} + +// Mount to the provided target path. +// +// If m.Type starts with "fuse." or "fuse3.", "mount.fuse" or "mount.fuse3" +// helper binary is called. +func (m *Mount) Mount(target string) error { + for _, helperBinary := range allowedHelperBinaries { + // helperBinary = "mount.fuse", typePrefix = "fuse." + typePrefix := strings.TrimPrefix(helperBinary, "mount.") + "." + if strings.HasPrefix(m.Type, typePrefix) { + return m.mountWithHelper(helperBinary, typePrefix, target) + } + } + var ( + chdir string + options = m.Options + ) + + // avoid hitting one page limit of mount argument buffer + // + // NOTE: 512 is a buffer during pagesize check. + if m.Type == "overlay" && optionsSize(options) >= pagesize-512 { + chdir, options = compactLowerdirOption(options) + } + + flags, data := parseMountOptions(options) + if len(data) > pagesize { + return errors.Errorf("mount options is too long") + } + + // propagation types. + const ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE + + // Ensure propagation type change flags aren't included in other calls. + oflags := flags &^ ptypes + + // In the case of remounting with changed data (data != ""), need to call mount (moby/moby#34077). + if flags&unix.MS_REMOUNT == 0 || data != "" { + // Initial call applying all non-propagation flags for mount + // or remount with changed data + if err := mountAt(chdir, m.Source, target, m.Type, uintptr(oflags), data); err != nil { + return err + } + } + + if flags&ptypes != 0 { + // Change the propagation type. + const pflags = ptypes | unix.MS_REC | unix.MS_SILENT + if err := unix.Mount("", target, "", uintptr(flags&pflags), ""); err != nil { + return err + } + } + + const broflags = unix.MS_BIND | unix.MS_RDONLY + if oflags&broflags == broflags { + // Remount the bind to apply read only. + return unix.Mount("", target, "", uintptr(oflags|unix.MS_REMOUNT), "") + } + return nil +} + +// Unmount the provided mount path with the flags +func Unmount(target string, flags int) error { + if err := unmount(target, flags); err != nil && err != unix.EINVAL { + return err + } + return nil +} + +func isFUSE(dir string) (bool, error) { + // fuseSuperMagic is defined in statfs(2) + const fuseSuperMagic = 0x65735546 + var st unix.Statfs_t + if err := unix.Statfs(dir, &st); err != nil { + return false, err + } + return st.Type == fuseSuperMagic, nil +} + +func unmount(target string, flags int) error { + // For FUSE mounts, attempting to execute fusermount helper binary is preferred + // https://github.com/containerd/containerd/pull/3765#discussion_r342083514 + if ok, err := isFUSE(target); err == nil && ok { + for _, helperBinary := range []string{"fusermount3", "fusermount"} { + cmd := exec.Command(helperBinary, "-u", target) + if err := cmd.Run(); err == nil { + return nil + } + // ignore error and try unix.Unmount + } + } + for i := 0; i < 50; i++ { + if err := unix.Unmount(target, flags); err != nil { + switch err { + case unix.EBUSY: + time.Sleep(50 * time.Millisecond) + continue + default: + return err + } + } + return nil + } + return errors.Wrapf(unix.EBUSY, "failed to unmount target %s", target) +} + +// UnmountAll repeatedly unmounts the given mount point until there +// are no mounts remaining (EINVAL is returned by mount), which is +// useful for undoing a stack of mounts on the same mount point. +// UnmountAll all is noop when the first argument is an empty string. +// This is done when the containerd client did not specify any rootfs +// mounts (e.g. because the rootfs is managed outside containerd) +// UnmountAll is noop when the mount path does not exist. +func UnmountAll(mount string, flags int) error { + if mount == "" { + return nil + } + if _, err := os.Stat(mount); os.IsNotExist(err) { + return nil + } + + for { + if err := unmount(mount, flags); err != nil { + // EINVAL is returned if the target is not a + // mount point, indicating that we are + // done. It can also indicate a few other + // things (such as invalid flags) which we + // unfortunately end up squelching here too. + if err == unix.EINVAL { + return nil + } + return err + } + } +} + +// parseMountOptions takes fstab style mount options and parses them for +// use with a standard mount() syscall +func parseMountOptions(options []string) (int, string) { + var ( + flag int + data []string + ) + flags := map[string]struct { + clear bool + flag int + }{ + "async": {true, unix.MS_SYNCHRONOUS}, + "atime": {true, unix.MS_NOATIME}, + "bind": {false, unix.MS_BIND}, + "defaults": {false, 0}, + "dev": {true, unix.MS_NODEV}, + "diratime": {true, unix.MS_NODIRATIME}, + "dirsync": {false, unix.MS_DIRSYNC}, + "exec": {true, unix.MS_NOEXEC}, + "mand": {false, unix.MS_MANDLOCK}, + "noatime": {false, unix.MS_NOATIME}, + "nodev": {false, unix.MS_NODEV}, + "nodiratime": {false, unix.MS_NODIRATIME}, + "noexec": {false, unix.MS_NOEXEC}, + "nomand": {true, unix.MS_MANDLOCK}, + "norelatime": {true, unix.MS_RELATIME}, + "nostrictatime": {true, unix.MS_STRICTATIME}, + "nosuid": {false, unix.MS_NOSUID}, + "rbind": {false, unix.MS_BIND | unix.MS_REC}, + "relatime": {false, unix.MS_RELATIME}, + "remount": {false, unix.MS_REMOUNT}, + "ro": {false, unix.MS_RDONLY}, + "rw": {true, unix.MS_RDONLY}, + "strictatime": {false, unix.MS_STRICTATIME}, + "suid": {true, unix.MS_NOSUID}, + "sync": {false, unix.MS_SYNCHRONOUS}, + } + for _, o := range options { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &^= f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// compactLowerdirOption updates overlay lowdir option and returns the common +// dir among all the lowdirs. +func compactLowerdirOption(opts []string) (string, []string) { + idx, dirs := findOverlayLowerdirs(opts) + if idx == -1 || len(dirs) == 1 { + // no need to compact if there is only one lowerdir + return "", opts + } + + // find out common dir + commondir := longestCommonPrefix(dirs) + if commondir == "" { + return "", opts + } + + // NOTE: the snapshot id is based on digits. + // in order to avoid to get snapshots/x, should be back to parent dir. + // however, there is assumption that the common dir is ${root}/io.containerd.v1.overlayfs/snapshots. + commondir = path.Dir(commondir) + if commondir == "/" { + return "", opts + } + commondir = commondir + "/" + + newdirs := make([]string, 0, len(dirs)) + for _, dir := range dirs { + newdirs = append(newdirs, dir[len(commondir):]) + } + + newopts := copyOptions(opts) + newopts = append(newopts[:idx], newopts[idx+1:]...) + newopts = append(newopts, fmt.Sprintf("lowerdir=%s", strings.Join(newdirs, ":"))) + return commondir, newopts +} + +// findOverlayLowerdirs returns the index of lowerdir in mount's options and +// all the lowerdir target. +func findOverlayLowerdirs(opts []string) (int, []string) { + var ( + idx = -1 + prefix = "lowerdir=" + ) + + for i, opt := range opts { + if strings.HasPrefix(opt, prefix) { + idx = i + break + } + } + + if idx == -1 { + return -1, nil + } + return idx, strings.Split(opts[idx][len(prefix):], ":") +} + +// longestCommonPrefix finds the longest common prefix in the string slice. +func longestCommonPrefix(strs []string) string { + if len(strs) == 0 { + return "" + } else if len(strs) == 1 { + return strs[0] + } + + // find out the min/max value by alphabetical order + min, max := strs[0], strs[0] + for _, str := range strs[1:] { + if min > str { + min = str + } + if max < str { + max = str + } + } + + // find out the common part between min and max + for i := 0; i < len(min) && i < len(max); i++ { + if min[i] != max[i] { + return min[:i] + } + } + return min +} + +// copyOptions copies the options. +func copyOptions(opts []string) []string { + if len(opts) == 0 { + return nil + } + + acopy := make([]string, len(opts)) + copy(acopy, opts) + return acopy +} + +// optionsSize returns the byte size of options of mount. +func optionsSize(opts []string) int { + size := 0 + for _, opt := range opts { + size += len(opt) + } + return size +} + +func mountAt(chdir string, source, target, fstype string, flags uintptr, data string) error { + if chdir == "" { + return unix.Mount(source, target, fstype, flags, data) + } + + f, err := os.Open(chdir) + if err != nil { + return errors.Wrap(err, "failed to mountat") + } + defer f.Close() + + fs, err := f.Stat() + if err != nil { + return errors.Wrap(err, "failed to mountat") + } + + if !fs.IsDir() { + return errors.Wrap(errors.Errorf("%s is not dir", chdir), "failed to mountat") + } + return errors.Wrap(sys.FMountat(f.Fd(), source, target, fstype, flags, data), "failed to mountat") +} + +func (m *Mount) mountWithHelper(helperBinary, typePrefix, target string) error { + // helperBinary: "mount.fuse3" + // target: "/foo/merged" + // m.Type: "fuse3.fuse-overlayfs" + // command: "mount.fuse3 overlay /foo/merged -o lowerdir=/foo/lower2:/foo/lower1,upperdir=/foo/upper,workdir=/foo/work -t fuse-overlayfs" + args := []string{m.Source, target} + for _, o := range m.Options { + args = append(args, "-o", o) + } + args = append(args, "-t", strings.TrimPrefix(m.Type, typePrefix)) + cmd := exec.Command(helperBinary, args...) + out, err := cmd.CombinedOutput() + if err != nil { + return errors.Wrapf(err, "mount helper [%s %v] failed: %q", helperBinary, args, string(out)) + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/mount/mount_unix.go b/vendor/github.com/containerd/containerd/mount/mount_unix.go new file mode 100644 index 00000000..95da9428 --- /dev/null +++ b/vendor/github.com/containerd/containerd/mount/mount_unix.go @@ -0,0 +1,41 @@ +// +build darwin freebsd openbsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +import "github.com/pkg/errors" + +var ( + // ErrNotImplementOnUnix is returned for methods that are not implemented + ErrNotImplementOnUnix = errors.New("not implemented under unix") +) + +// Mount is not implemented on this platform +func (m *Mount) Mount(target string) error { + return ErrNotImplementOnUnix +} + +// Unmount is not implemented on this platform +func Unmount(mount string, flags int) error { + return ErrNotImplementOnUnix +} + +// UnmountAll is not implemented on this platform +func UnmountAll(mount string, flags int) error { + return ErrNotImplementOnUnix +} diff --git a/vendor/github.com/containerd/containerd/mount/mount_windows.go b/vendor/github.com/containerd/containerd/mount/mount_windows.go new file mode 100644 index 00000000..5de25c4e --- /dev/null +++ b/vendor/github.com/containerd/containerd/mount/mount_windows.go @@ -0,0 +1,105 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +import ( + "encoding/json" + "path/filepath" + "strings" + + "github.com/Microsoft/hcsshim" + "github.com/pkg/errors" +) + +var ( + // ErrNotImplementOnWindows is returned when an action is not implemented for windows + ErrNotImplementOnWindows = errors.New("not implemented under windows") +) + +// Mount to the provided target +func (m *Mount) Mount(target string) error { + if m.Type != "windows-layer" { + return errors.Errorf("invalid windows mount type: '%s'", m.Type) + } + + home, layerID := filepath.Split(m.Source) + + parentLayerPaths, err := m.GetParentPaths() + if err != nil { + return err + } + + var di = hcsshim.DriverInfo{ + HomeDir: home, + } + + if err = hcsshim.ActivateLayer(di, layerID); err != nil { + return errors.Wrapf(err, "failed to activate layer %s", m.Source) + } + defer func() { + if err != nil { + hcsshim.DeactivateLayer(di, layerID) + } + }() + + if err = hcsshim.PrepareLayer(di, layerID, parentLayerPaths); err != nil { + return errors.Wrapf(err, "failed to prepare layer %s", m.Source) + } + return nil +} + +// ParentLayerPathsFlag is the options flag used to represent the JSON encoded +// list of parent layers required to use the layer +const ParentLayerPathsFlag = "parentLayerPaths=" + +// GetParentPaths of the mount +func (m *Mount) GetParentPaths() ([]string, error) { + var parentLayerPaths []string + for _, option := range m.Options { + if strings.HasPrefix(option, ParentLayerPathsFlag) { + err := json.Unmarshal([]byte(option[len(ParentLayerPathsFlag):]), &parentLayerPaths) + if err != nil { + return nil, errors.Wrap(err, "failed to unmarshal parent layer paths from mount") + } + } + } + return parentLayerPaths, nil +} + +// Unmount the mount at the provided path +func Unmount(mount string, flags int) error { + var ( + home, layerID = filepath.Split(mount) + di = hcsshim.DriverInfo{ + HomeDir: home, + } + ) + + if err := hcsshim.UnprepareLayer(di, layerID); err != nil { + return errors.Wrapf(err, "failed to unprepare layer %s", mount) + } + if err := hcsshim.DeactivateLayer(di, layerID); err != nil { + return errors.Wrapf(err, "failed to deactivate layer %s", mount) + } + + return nil +} + +// UnmountAll unmounts from the provided path +func UnmountAll(mount string, flags int) error { + return Unmount(mount, flags) +} diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo.go b/vendor/github.com/containerd/containerd/mount/mountinfo.go new file mode 100644 index 00000000..e7a68402 --- /dev/null +++ b/vendor/github.com/containerd/containerd/mount/mountinfo.go @@ -0,0 +1,56 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Options represents mount-specific options. + Options string + + // Optional represents optional fields. + Optional string + + // FSType indicates the type of filesystem, such as EXT3. + FSType string + + // Source indicates filesystem specific information or "none". + Source string + + // VFSOptions represents per super block options. + VFSOptions string +} diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo_bsd.go b/vendor/github.com/containerd/containerd/mount/mountinfo_bsd.go new file mode 100644 index 00000000..8f8dbf95 --- /dev/null +++ b/vendor/github.com/containerd/containerd/mount/mountinfo_bsd.go @@ -0,0 +1,63 @@ +// +build freebsd openbsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Self retrieves a list of mounts for the current running process. +func Self() ([]Info, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []Info + for _, entry := range entries { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.FSType = C.GoString(&entry.f_fstypename[0]) + out = append(out, mountinfo) + } + return out, nil +} + +// PID collects the mounts for a specific process ID. +func PID(pid int) ([]Info, error) { + return nil, fmt.Errorf("mountinfo.PID is not implemented on freebsd") +} diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go b/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go new file mode 100644 index 00000000..a7407c50 --- /dev/null +++ b/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go @@ -0,0 +1,145 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +// Self retrieves a list of mounts for the current running process. +func Self() ([]Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]Info, error) { + s := bufio.NewScanner(r) + out := []Info{} + var err error + for s.Scan() { + if err = s.Err(); err != nil { + return nil, err + } + + /* + See http://man7.org/linux/man-pages/man5/proc.5.html + + 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options + */ + + text := s.Text() + fields := strings.Split(text, " ") + numFields := len(fields) + if numFields < 10 { + // should be at least 10 fields + return nil, errors.Errorf("parsing '%s' failed: not enough fields (%d)", text, numFields) + } + p := Info{} + // ignore any numbers parsing errors, as there should not be any + p.ID, _ = strconv.Atoi(fields[0]) + p.Parent, _ = strconv.Atoi(fields[1]) + mm := strings.Split(fields[2], ":") + if len(mm) != 2 { + return nil, errors.Errorf("parsing '%s' failed: unexpected minor:major pair %s", text, mm) + } + p.Major, _ = strconv.Atoi(mm[0]) + p.Minor, _ = strconv.Atoi(mm[1]) + + p.Root, err = strconv.Unquote(`"` + fields[3] + `"`) + if err != nil { + return nil, errors.Wrapf(err, "parsing '%s' failed: unable to unquote root field", fields[3]) + } + p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`) + if err != nil { + return nil, errors.Wrapf(err, "parsing '%s' failed: unable to unquote mount point field", fields[4]) + } + p.Options = fields[5] + + // one or more optional fields, when a separator (-) + i := 6 + for ; i < numFields && fields[i] != "-"; i++ { + switch i { + case 6: + p.Optional = fields[6] + default: + /* NOTE there might be more optional fields before the separator + such as fields[7]...fields[N] (where N < separatorIndex), + although as of Linux kernel 4.15 the only known ones are + mount propagation flags in fields[6]. The correct + behavior is to ignore any unknown optional fields. + */ + } + } + if i == numFields { + return nil, errors.Errorf("parsing '%s' failed: missing separator ('-')", text) + } + // There should be 3 fields after the separator... + if i+4 > numFields { + return nil, errors.Errorf("parsing '%s' failed: not enough fields after a separator", text) + } + // ... but in Linux <= 3.9 mounting a cifs with spaces in a share name + // (like "//serv/My Documents") _may_ end up having a space in the last field + // of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs + // option unc= is ignored, so a space should not appear. In here we ignore + // those "extra" fields caused by extra spaces. + p.FSType = fields[i+1] + p.Source = fields[i+2] + p.VFSOptions = fields[i+3] + + out = append(out, p) + } + return out, nil +} + +// PID collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `Self` which will inspect +// "/proc/self/mountinfo" instead. +func PID(pid int) ([]Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo_unsupported.go b/vendor/github.com/containerd/containerd/mount/mountinfo_unsupported.go new file mode 100644 index 00000000..ae998db6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/mount/mountinfo_unsupported.go @@ -0,0 +1,34 @@ +// +build !linux,!freebsd,!solaris,!openbsd freebsd,!cgo solaris,!cgo openbsd,!cgo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +import ( + "fmt" + "runtime" +) + +// Self retrieves a list of mounts for the current running process. +func Self() ([]Info, error) { + return nil, fmt.Errorf("mountinfo.Self is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// PID collects the mounts for a specific process ID. +func PID(pid int) ([]Info, error) { + return nil, fmt.Errorf("mountinfo.PID is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/github.com/containerd/containerd/mount/temp.go b/vendor/github.com/containerd/containerd/mount/temp.go new file mode 100644 index 00000000..9dc4010f --- /dev/null +++ b/vendor/github.com/containerd/containerd/mount/temp.go @@ -0,0 +1,73 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +import ( + "context" + "io/ioutil" + "os" + + "github.com/containerd/containerd/log" + "github.com/pkg/errors" +) + +var tempMountLocation = getTempDir() + +// WithTempMount mounts the provided mounts to a temp dir, and pass the temp dir to f. +// The mounts are valid during the call to the f. +// Finally we will unmount and remove the temp dir regardless of the result of f. +func WithTempMount(ctx context.Context, mounts []Mount, f func(root string) error) (err error) { + root, uerr := ioutil.TempDir(tempMountLocation, "containerd-mount") + if uerr != nil { + return errors.Wrapf(uerr, "failed to create temp dir") + } + // We use Remove here instead of RemoveAll. + // The RemoveAll will delete the temp dir and all children it contains. + // When the Unmount fails, RemoveAll will incorrectly delete data from + // the mounted dir. However, if we use Remove, even though we won't + // successfully delete the temp dir and it may leak, we won't loss data + // from the mounted dir. + // For details, please refer to #1868 #1785. + defer func() { + if uerr = os.Remove(root); uerr != nil { + log.G(ctx).WithError(uerr).WithField("dir", root).Errorf("failed to remove mount temp dir") + } + }() + + // We should do defer first, if not we will not do Unmount when only a part of Mounts are failed. + defer func() { + if uerr = UnmountAll(root, 0); uerr != nil { + uerr = errors.Wrapf(uerr, "failed to unmount %s", root) + if err == nil { + err = uerr + } else { + err = errors.Wrap(err, uerr.Error()) + } + } + }() + if uerr = All(mounts, root); uerr != nil { + return errors.Wrapf(uerr, "failed to mount %s", root) + } + return errors.Wrapf(f(root), "mount callback failed on %s", root) +} + +func getTempDir() string { + if xdg := os.Getenv("XDG_RUNTIME_DIR"); xdg != "" { + return xdg + } + return os.TempDir() +} diff --git a/vendor/github.com/containerd/containerd/mount/temp_unix.go b/vendor/github.com/containerd/containerd/mount/temp_unix.go new file mode 100644 index 00000000..3d490e8a --- /dev/null +++ b/vendor/github.com/containerd/containerd/mount/temp_unix.go @@ -0,0 +1,64 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +import ( + "os" + "path/filepath" + "sort" + "strings" +) + +// SetTempMountLocation sets the temporary mount location +func SetTempMountLocation(root string) error { + root, err := filepath.Abs(root) + if err != nil { + return err + } + if err := os.MkdirAll(root, 0700); err != nil { + return err + } + tempMountLocation = root + return nil +} + +// CleanupTempMounts all temp mounts and remove the directories +func CleanupTempMounts(flags int) (warnings []error, err error) { + mounts, err := Self() + if err != nil { + return nil, err + } + var toUnmount []string + for _, m := range mounts { + if strings.HasPrefix(m.Mountpoint, tempMountLocation) { + toUnmount = append(toUnmount, m.Mountpoint) + } + } + sort.Sort(sort.Reverse(sort.StringSlice(toUnmount))) + for _, path := range toUnmount { + if err := UnmountAll(path, flags); err != nil { + warnings = append(warnings, err) + continue + } + if err := os.Remove(path); err != nil { + warnings = append(warnings, err) + } + } + return warnings, nil +} diff --git a/vendor/github.com/containerd/containerd/mount/temp_unsupported.go b/vendor/github.com/containerd/containerd/mount/temp_unsupported.go new file mode 100644 index 00000000..942be412 --- /dev/null +++ b/vendor/github.com/containerd/containerd/mount/temp_unsupported.go @@ -0,0 +1,29 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +// SetTempMountLocation sets the temporary mount location +func SetTempMountLocation(root string) error { + return nil +} + +// CleanupTempMounts all temp mounts and remove the directories +func CleanupTempMounts(flags int) ([]error, error) { + return nil, nil +} diff --git a/vendor/github.com/containerd/containerd/namespaces/context.go b/vendor/github.com/containerd/containerd/namespaces/context.go new file mode 100644 index 00000000..b53c9012 --- /dev/null +++ b/vendor/github.com/containerd/containerd/namespaces/context.go @@ -0,0 +1,78 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package namespaces + +import ( + "context" + "os" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/identifiers" + "github.com/pkg/errors" +) + +const ( + // NamespaceEnvVar is the environment variable key name + NamespaceEnvVar = "CONTAINERD_NAMESPACE" + // Default is the name of the default namespace + Default = "default" +) + +type namespaceKey struct{} + +// WithNamespace sets a given namespace on the context +func WithNamespace(ctx context.Context, namespace string) context.Context { + ctx = context.WithValue(ctx, namespaceKey{}, namespace) // set our key for namespace + // also store on the grpc and ttrpc headers so it gets picked up by any clients that + // are using this. + return withTTRPCNamespaceHeader(withGRPCNamespaceHeader(ctx, namespace), namespace) +} + +// NamespaceFromEnv uses the namespace defined in CONTAINERD_NAMESPACE or +// default +func NamespaceFromEnv(ctx context.Context) context.Context { + namespace := os.Getenv(NamespaceEnvVar) + if namespace == "" { + namespace = Default + } + return WithNamespace(ctx, namespace) +} + +// Namespace returns the namespace from the context. +// +// The namespace is not guaranteed to be valid. +func Namespace(ctx context.Context) (string, bool) { + namespace, ok := ctx.Value(namespaceKey{}).(string) + if !ok { + if namespace, ok = fromGRPCHeader(ctx); !ok { + return fromTTRPCHeader(ctx) + } + } + return namespace, ok +} + +// NamespaceRequired returns the valid namespace from the context or an error. +func NamespaceRequired(ctx context.Context) (string, error) { + namespace, ok := Namespace(ctx) + if !ok || namespace == "" { + return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace is required") + } + if err := identifiers.Validate(namespace); err != nil { + return "", errors.Wrap(err, "namespace validation") + } + return namespace, nil +} diff --git a/vendor/github.com/containerd/containerd/namespaces/grpc.go b/vendor/github.com/containerd/containerd/namespaces/grpc.go new file mode 100644 index 00000000..6991460d --- /dev/null +++ b/vendor/github.com/containerd/containerd/namespaces/grpc.go @@ -0,0 +1,61 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package namespaces + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +const ( + // GRPCHeader defines the header name for specifying a containerd namespace. + GRPCHeader = "containerd-namespace" +) + +// NOTE(stevvooe): We can stub this file out if we don't want a grpc dependency here. + +func withGRPCNamespaceHeader(ctx context.Context, namespace string) context.Context { + // also store on the grpc headers so it gets picked up by any clients that + // are using this. + nsheader := metadata.Pairs(GRPCHeader, namespace) + md, ok := metadata.FromOutgoingContext(ctx) // merge with outgoing context. + if !ok { + md = nsheader + } else { + // order ensures the latest is first in this list. + md = metadata.Join(nsheader, md) + } + + return metadata.NewOutgoingContext(ctx, md) +} + +func fromGRPCHeader(ctx context.Context) (string, bool) { + // try to extract for use in grpc servers. + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + // TODO(stevvooe): Check outgoing context? + return "", false + } + + values := md[GRPCHeader] + if len(values) == 0 { + return "", false + } + + return values[0], true +} diff --git a/vendor/github.com/containerd/containerd/namespaces/store.go b/vendor/github.com/containerd/containerd/namespaces/store.go new file mode 100644 index 00000000..5936772c --- /dev/null +++ b/vendor/github.com/containerd/containerd/namespaces/store.go @@ -0,0 +1,46 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package namespaces + +import "context" + +// Store provides introspection about namespaces. +// +// Note that these are slightly different than other objects, which are record +// oriented. A namespace is really just a name and a set of labels. Objects +// that belong to a namespace are returned when the namespace is assigned to a +// given context. +// +// +type Store interface { + Create(ctx context.Context, namespace string, labels map[string]string) error + Labels(ctx context.Context, namespace string) (map[string]string, error) + SetLabel(ctx context.Context, namespace, key, value string) error + List(ctx context.Context) ([]string, error) + + // Delete removes the namespace. The namespace must be empty to be deleted. + Delete(ctx context.Context, namespace string, opts ...DeleteOpts) error +} + +// DeleteInfo specifies information for the deletion of a namespace +type DeleteInfo struct { + // Name of the namespace + Name string +} + +// DeleteOpts allows the caller to set options for namespace deletion +type DeleteOpts func(context.Context, *DeleteInfo) error diff --git a/vendor/github.com/containerd/containerd/namespaces/ttrpc.go b/vendor/github.com/containerd/containerd/namespaces/ttrpc.go new file mode 100644 index 00000000..bcd2643c --- /dev/null +++ b/vendor/github.com/containerd/containerd/namespaces/ttrpc.go @@ -0,0 +1,51 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package namespaces + +import ( + "context" + + "github.com/containerd/ttrpc" +) + +const ( + // TTRPCHeader defines the header name for specifying a containerd namespace + TTRPCHeader = "containerd-namespace-ttrpc" +) + +func copyMetadata(src ttrpc.MD) ttrpc.MD { + md := ttrpc.MD{} + for k, v := range src { + md[k] = append(md[k], v...) + } + return md +} + +func withTTRPCNamespaceHeader(ctx context.Context, namespace string) context.Context { + md, ok := ttrpc.GetMetadata(ctx) + if !ok { + md = ttrpc.MD{} + } else { + md = copyMetadata(md) + } + md.Set(TTRPCHeader, namespace) + return ttrpc.WithMetadata(ctx, md) +} + +func fromTTRPCHeader(ctx context.Context) (string, bool) { + return ttrpc.GetMetadataValue(ctx, TTRPCHeader) +} diff --git a/vendor/github.com/containerd/containerd/oci/client.go b/vendor/github.com/containerd/containerd/oci/client.go new file mode 100644 index 00000000..9923101b --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/client.go @@ -0,0 +1,38 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "context" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/snapshots" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Client interface used by SpecOpt +type Client interface { + SnapshotService(snapshotterName string) snapshots.Snapshotter +} + +// Image interface used by some SpecOpt to query image configuration +type Image interface { + // Config descriptor for the image. + Config(ctx context.Context) (ocispec.Descriptor, error) + // ContentStore provides a content store which contains image blob data + ContentStore() content.Store +} diff --git a/vendor/github.com/containerd/containerd/oci/spec.go b/vendor/github.com/containerd/containerd/oci/spec.go new file mode 100644 index 00000000..035bb7e7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/spec.go @@ -0,0 +1,253 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "context" + "path/filepath" + "runtime" + + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/platforms" + + "github.com/containerd/containerd/containers" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + rwm = "rwm" + defaultRootfsPath = "rootfs" +) + +var ( + defaultUnixEnv = []string{ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", + } +) + +// Spec is a type alias to the OCI runtime spec to allow third part SpecOpts +// to be created without the "issues" with go vendoring and package imports +type Spec = specs.Spec + +// GenerateSpec will generate a default spec from the provided image +// for use as a containerd container +func GenerateSpec(ctx context.Context, client Client, c *containers.Container, opts ...SpecOpts) (*Spec, error) { + return GenerateSpecWithPlatform(ctx, client, platforms.DefaultString(), c, opts...) +} + +// GenerateSpecWithPlatform will generate a default spec from the provided image +// for use as a containerd container in the platform requested. +func GenerateSpecWithPlatform(ctx context.Context, client Client, platform string, c *containers.Container, opts ...SpecOpts) (*Spec, error) { + var s Spec + if err := generateDefaultSpecWithPlatform(ctx, platform, c.ID, &s); err != nil { + return nil, err + } + + return &s, ApplyOpts(ctx, client, c, &s, opts...) +} + +func generateDefaultSpecWithPlatform(ctx context.Context, platform, id string, s *Spec) error { + plat, err := platforms.Parse(platform) + if err != nil { + return err + } + + if plat.OS == "windows" { + err = populateDefaultWindowsSpec(ctx, s, id) + } else { + err = populateDefaultUnixSpec(ctx, s, id) + if err == nil && runtime.GOOS == "windows" { + // To run LCOW we have a Linux and Windows section. Add an empty one now. + s.Windows = &specs.Windows{} + } + } + return err +} + +// ApplyOpts applies the options to the given spec, injecting data from the +// context, client and container instance. +func ApplyOpts(ctx context.Context, client Client, c *containers.Container, s *Spec, opts ...SpecOpts) error { + for _, o := range opts { + if err := o(ctx, client, c, s); err != nil { + return err + } + } + + return nil +} + +func defaultUnixCaps() []string { + return []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + } +} + +func defaultUnixNamespaces() []specs.LinuxNamespace { + return []specs.LinuxNamespace{ + { + Type: specs.PIDNamespace, + }, + { + Type: specs.IPCNamespace, + }, + { + Type: specs.UTSNamespace, + }, + { + Type: specs.MountNamespace, + }, + { + Type: specs.NetworkNamespace, + }, + } +} + +func populateDefaultUnixSpec(ctx context.Context, s *Spec, id string) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + *s = Spec{ + Version: specs.Version, + Root: &specs.Root{ + Path: defaultRootfsPath, + }, + Process: &specs.Process{ + Cwd: "/", + NoNewPrivileges: true, + User: specs.User{ + UID: 0, + GID: 0, + }, + Capabilities: &specs.LinuxCapabilities{ + Bounding: defaultUnixCaps(), + Permitted: defaultUnixCaps(), + Inheritable: defaultUnixCaps(), + Effective: defaultUnixCaps(), + }, + Rlimits: []specs.POSIXRlimit{ + { + Type: "RLIMIT_NOFILE", + Hard: uint64(1024), + Soft: uint64(1024), + }, + }, + }, + Mounts: []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/dev/shm", + Type: "tmpfs", + Source: "shm", + Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + { + Destination: "/run", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, + }, + }, + Linux: &specs.Linux{ + MaskedPaths: []string{ + "/proc/acpi", + "/proc/asound", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/sys/firmware", + "/proc/scsi", + }, + ReadonlyPaths: []string{ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + }, + CgroupsPath: filepath.Join("/", ns, id), + Resources: &specs.LinuxResources{ + Devices: []specs.LinuxDeviceCgroup{ + { + Allow: false, + Access: rwm, + }, + }, + }, + Namespaces: defaultUnixNamespaces(), + }, + } + return nil +} + +func populateDefaultWindowsSpec(ctx context.Context, s *Spec, id string) error { + *s = Spec{ + Version: specs.Version, + Root: &specs.Root{}, + Process: &specs.Process{ + Cwd: `C:\`, + }, + Windows: &specs.Windows{}, + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts.go b/vendor/github.com/containerd/containerd/oci/spec_opts.go new file mode 100644 index 00000000..4c14c557 --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/spec_opts.go @@ -0,0 +1,1258 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/platforms" + "github.com/containerd/continuity/fs" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/opencontainers/runc/libcontainer/user" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/syndtr/gocapability/capability" +) + +// SpecOpts sets spec specific information to a newly generated OCI spec +type SpecOpts func(context.Context, Client, *containers.Container, *Spec) error + +// Compose converts a sequence of spec operations into a single operation +func Compose(opts ...SpecOpts) SpecOpts { + return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { + for _, o := range opts { + if err := o(ctx, client, c, s); err != nil { + return err + } + } + return nil + } +} + +// setProcess sets Process to empty if unset +func setProcess(s *Spec) { + if s.Process == nil { + s.Process = &specs.Process{} + } +} + +// setRoot sets Root to empty if unset +func setRoot(s *Spec) { + if s.Root == nil { + s.Root = &specs.Root{} + } +} + +// setLinux sets Linux to empty if unset +func setLinux(s *Spec) { + if s.Linux == nil { + s.Linux = &specs.Linux{} + } +} + +// nolint +func setResources(s *Spec) { + if s.Linux != nil { + if s.Linux.Resources == nil { + s.Linux.Resources = &specs.LinuxResources{} + } + } + if s.Windows != nil { + if s.Windows.Resources == nil { + s.Windows.Resources = &specs.WindowsResources{} + } + } +} + +// setCapabilities sets Linux Capabilities to empty if unset +func setCapabilities(s *Spec) { + setProcess(s) + if s.Process.Capabilities == nil { + s.Process.Capabilities = &specs.LinuxCapabilities{} + } +} + +// WithDefaultSpec returns a SpecOpts that will populate the spec with default +// values. +// +// Use as the first option to clear the spec, then apply options afterwards. +func WithDefaultSpec() SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + return generateDefaultSpecWithPlatform(ctx, platforms.DefaultString(), c.ID, s) + } +} + +// WithDefaultSpecForPlatform returns a SpecOpts that will populate the spec +// with default values for a given platform. +// +// Use as the first option to clear the spec, then apply options afterwards. +func WithDefaultSpecForPlatform(platform string) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + return generateDefaultSpecWithPlatform(ctx, platform, c.ID, s) + } +} + +// WithSpecFromBytes loads the spec from the provided byte slice. +func WithSpecFromBytes(p []byte) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + *s = Spec{} // make sure spec is cleared. + if err := json.Unmarshal(p, s); err != nil { + return errors.Wrapf(err, "decoding spec config file failed, current supported OCI runtime-spec : v%s", specs.Version) + } + return nil + } +} + +// WithSpecFromFile loads the specification from the provided filename. +func WithSpecFromFile(filename string) SpecOpts { + return func(ctx context.Context, c Client, container *containers.Container, s *Spec) error { + p, err := ioutil.ReadFile(filename) + if err != nil { + return errors.Wrap(err, "cannot load spec config file") + } + return WithSpecFromBytes(p)(ctx, c, container, s) + } +} + +// WithEnv appends environment variables +func WithEnv(environmentVariables []string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if len(environmentVariables) > 0 { + setProcess(s) + s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, environmentVariables) + } + return nil + } +} + +// WithDefaultPathEnv sets the $PATH environment variable to the +// default PATH defined in this package. +func WithDefaultPathEnv(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, defaultUnixEnv) + return nil +} + +// replaceOrAppendEnvValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func replaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + results := make([]string, 0, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + results = append(results, e) + cache[parts[0]] = i + } + + for _, value := range overrides { + // Values w/o = means they want this env to be removed/unset. + if !strings.Contains(value, "=") { + if i, exists := cache[value]; exists { + results[i] = "" // Used to indicate it should be removed + } + continue + } + + // Just do a normal set/update + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + results[i] = value + } else { + results = append(results, value) + } + } + + // Now remove all entries that we want to "unset" + for i := 0; i < len(results); i++ { + if results[i] == "" { + results = append(results[:i], results[i+1:]...) + i-- + } + } + + return results +} + +// WithProcessArgs replaces the args on the generated spec +func WithProcessArgs(args ...string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setProcess(s) + s.Process.Args = args + return nil + } +} + +// WithProcessCwd replaces the current working directory on the generated spec +func WithProcessCwd(cwd string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setProcess(s) + s.Process.Cwd = cwd + return nil + } +} + +// WithTTY sets the information on the spec as well as the environment variables for +// using a TTY +func WithTTY(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setProcess(s) + s.Process.Terminal = true + if s.Linux != nil { + s.Process.Env = append(s.Process.Env, "TERM=xterm") + } + + return nil +} + +// WithTTYSize sets the information on the spec as well as the environment variables for +// using a TTY +func WithTTYSize(width, height int) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setProcess(s) + if s.Process.ConsoleSize == nil { + s.Process.ConsoleSize = &specs.Box{} + } + s.Process.ConsoleSize.Width = uint(width) + s.Process.ConsoleSize.Height = uint(height) + return nil + } +} + +// WithHostname sets the container's hostname +func WithHostname(name string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + s.Hostname = name + return nil + } +} + +// WithMounts appends mounts +func WithMounts(mounts []specs.Mount) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + s.Mounts = append(s.Mounts, mounts...) + return nil + } +} + +// WithHostNamespace allows a task to run inside the host's linux namespace +func WithHostNamespace(ns specs.LinuxNamespaceType) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + for i, n := range s.Linux.Namespaces { + if n.Type == ns { + s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...) + return nil + } + } + return nil + } +} + +// WithLinuxNamespace uses the passed in namespace for the spec. If a namespace of the same type already exists in the +// spec, the existing namespace is replaced by the one provided. +func WithLinuxNamespace(ns specs.LinuxNamespace) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + for i, n := range s.Linux.Namespaces { + if n.Type == ns.Type { + before := s.Linux.Namespaces[:i] + after := s.Linux.Namespaces[i+1:] + s.Linux.Namespaces = append(before, ns) + s.Linux.Namespaces = append(s.Linux.Namespaces, after...) + return nil + } + } + s.Linux.Namespaces = append(s.Linux.Namespaces, ns) + return nil + } +} + +// WithNewPrivileges turns off the NoNewPrivileges feature flag in the spec +func WithNewPrivileges(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setProcess(s) + s.Process.NoNewPrivileges = false + + return nil +} + +// WithImageConfig configures the spec to from the configuration of an Image +func WithImageConfig(image Image) SpecOpts { + return WithImageConfigArgs(image, nil) +} + +// WithImageConfigArgs configures the spec to from the configuration of an Image with additional args that +// replaces the CMD of the image +func WithImageConfigArgs(image Image, args []string) SpecOpts { + return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { + ic, err := image.Config(ctx) + if err != nil { + return err + } + var ( + ociimage v1.Image + config v1.ImageConfig + ) + switch ic.MediaType { + case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config: + p, err := content.ReadBlob(ctx, image.ContentStore(), ic) + if err != nil { + return err + } + + if err := json.Unmarshal(p, &ociimage); err != nil { + return err + } + config = ociimage.Config + default: + return fmt.Errorf("unknown image config media type %s", ic.MediaType) + } + + setProcess(s) + if s.Linux != nil { + defaults := config.Env + if len(defaults) == 0 { + defaults = defaultUnixEnv + } + s.Process.Env = replaceOrAppendEnvValues(defaults, s.Process.Env) + cmd := config.Cmd + if len(args) > 0 { + cmd = args + } + s.Process.Args = append(config.Entrypoint, cmd...) + + cwd := config.WorkingDir + if cwd == "" { + cwd = "/" + } + s.Process.Cwd = cwd + if config.User != "" { + if err := WithUser(config.User)(ctx, client, c, s); err != nil { + return err + } + return WithAdditionalGIDs(fmt.Sprintf("%d", s.Process.User.UID))(ctx, client, c, s) + } + // we should query the image's /etc/group for additional GIDs + // even if there is no specified user in the image config + return WithAdditionalGIDs("root")(ctx, client, c, s) + } else if s.Windows != nil { + s.Process.Env = replaceOrAppendEnvValues(config.Env, s.Process.Env) + cmd := config.Cmd + if len(args) > 0 { + cmd = args + } + s.Process.Args = append(config.Entrypoint, cmd...) + + s.Process.Cwd = config.WorkingDir + s.Process.User = specs.User{ + Username: config.User, + } + } else { + return errors.New("spec does not contain Linux or Windows section") + } + return nil + } +} + +// WithRootFSPath specifies unmanaged rootfs path. +func WithRootFSPath(path string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setRoot(s) + s.Root.Path = path + // Entrypoint is not set here (it's up to caller) + return nil + } +} + +// WithRootFSReadonly sets specs.Root.Readonly to true +func WithRootFSReadonly() SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setRoot(s) + s.Root.Readonly = true + return nil + } +} + +// WithNoNewPrivileges sets no_new_privileges on the process for the container +func WithNoNewPrivileges(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setProcess(s) + s.Process.NoNewPrivileges = true + return nil +} + +// WithHostHostsFile bind-mounts the host's /etc/hosts into the container as readonly +func WithHostHostsFile(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: "/etc/hosts", + Type: "bind", + Source: "/etc/hosts", + Options: []string{"rbind", "ro"}, + }) + return nil +} + +// WithHostResolvconf bind-mounts the host's /etc/resolv.conf into the container as readonly +func WithHostResolvconf(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: "/etc/resolv.conf", + Type: "bind", + Source: "/etc/resolv.conf", + Options: []string{"rbind", "ro"}, + }) + return nil +} + +// WithHostLocaltime bind-mounts the host's /etc/localtime into the container as readonly +func WithHostLocaltime(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: "/etc/localtime", + Type: "bind", + Source: "/etc/localtime", + Options: []string{"rbind", "ro"}, + }) + return nil +} + +// WithUserNamespace sets the uid and gid mappings for the task +// this can be called multiple times to add more mappings to the generated spec +func WithUserNamespace(uidMap, gidMap []specs.LinuxIDMapping) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + var hasUserns bool + setLinux(s) + for _, ns := range s.Linux.Namespaces { + if ns.Type == specs.UserNamespace { + hasUserns = true + break + } + } + if !hasUserns { + s.Linux.Namespaces = append(s.Linux.Namespaces, specs.LinuxNamespace{ + Type: specs.UserNamespace, + }) + } + s.Linux.UIDMappings = append(s.Linux.UIDMappings, uidMap...) + s.Linux.GIDMappings = append(s.Linux.GIDMappings, gidMap...) + return nil + } +} + +// WithCgroup sets the container's cgroup path +func WithCgroup(path string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + s.Linux.CgroupsPath = path + return nil + } +} + +// WithNamespacedCgroup uses the namespace set on the context to create a +// root directory for containers in the cgroup with the id as the subcgroup +func WithNamespacedCgroup() SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + namespace, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + setLinux(s) + s.Linux.CgroupsPath = filepath.Join("/", namespace, c.ID) + return nil + } +} + +// WithUser sets the user to be used within the container. +// It accepts a valid user string in OCI Image Spec v1.0.0: +// user, uid, user:group, uid:gid, uid:group, user:gid +func WithUser(userstr string) SpecOpts { + return func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { + setProcess(s) + parts := strings.Split(userstr, ":") + switch len(parts) { + case 1: + v, err := strconv.Atoi(parts[0]) + if err != nil { + // if we cannot parse as a uint they try to see if it is a username + return WithUsername(userstr)(ctx, client, c, s) + } + return WithUserID(uint32(v))(ctx, client, c, s) + case 2: + var ( + username string + groupname string + ) + var uid, gid uint32 + v, err := strconv.Atoi(parts[0]) + if err != nil { + username = parts[0] + } else { + uid = uint32(v) + } + if v, err = strconv.Atoi(parts[1]); err != nil { + groupname = parts[1] + } else { + gid = uint32(v) + } + if username == "" && groupname == "" { + s.Process.User.UID, s.Process.User.GID = uid, gid + return nil + } + f := func(root string) error { + if username != "" { + user, err := getUserFromPath(root, func(u user.User) bool { + return u.Name == username + }) + if err != nil { + return err + } + uid = uint32(user.Uid) + } + if groupname != "" { + gid, err = getGIDFromPath(root, func(g user.Group) bool { + return g.Name == groupname + }) + if err != nil { + return err + } + } + s.Process.User.UID, s.Process.User.GID = uid, gid + return nil + } + if c.Snapshotter == "" && c.SnapshotKey == "" { + if !isRootfsAbs(s.Root.Path) { + return errors.New("rootfs absolute path is required") + } + return f(s.Root.Path) + } + if c.Snapshotter == "" { + return errors.New("no snapshotter set for container") + } + if c.SnapshotKey == "" { + return errors.New("rootfs snapshot not created for container") + } + snapshotter := client.SnapshotService(c.Snapshotter) + mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) + if err != nil { + return err + } + return mount.WithTempMount(ctx, mounts, f) + default: + return fmt.Errorf("invalid USER value %s", userstr) + } + } +} + +// WithUIDGID allows the UID and GID for the Process to be set +func WithUIDGID(uid, gid uint32) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setProcess(s) + s.Process.User.UID = uid + s.Process.User.GID = gid + return nil + } +} + +// WithUserID sets the correct UID and GID for the container based +// on the image's /etc/passwd contents. If /etc/passwd does not exist, +// or uid is not found in /etc/passwd, it sets the requested uid, +// additionally sets the gid to 0, and does not return an error. +func WithUserID(uid uint32) SpecOpts { + return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) { + setProcess(s) + if c.Snapshotter == "" && c.SnapshotKey == "" { + if !isRootfsAbs(s.Root.Path) { + return errors.Errorf("rootfs absolute path is required") + } + user, err := getUserFromPath(s.Root.Path, func(u user.User) bool { + return u.Uid == int(uid) + }) + if err != nil { + if os.IsNotExist(err) || err == errNoUsersFound { + s.Process.User.UID, s.Process.User.GID = uid, 0 + return nil + } + return err + } + s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) + return nil + + } + if c.Snapshotter == "" { + return errors.Errorf("no snapshotter set for container") + } + if c.SnapshotKey == "" { + return errors.Errorf("rootfs snapshot not created for container") + } + snapshotter := client.SnapshotService(c.Snapshotter) + mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) + if err != nil { + return err + } + return mount.WithTempMount(ctx, mounts, func(root string) error { + user, err := getUserFromPath(root, func(u user.User) bool { + return u.Uid == int(uid) + }) + if err != nil { + if os.IsNotExist(err) || err == errNoUsersFound { + s.Process.User.UID, s.Process.User.GID = uid, 0 + return nil + } + return err + } + s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) + return nil + }) + } +} + +// WithUsername sets the correct UID and GID for the container +// based on the image's /etc/passwd contents. If /etc/passwd +// does not exist, or the username is not found in /etc/passwd, +// it returns error. +func WithUsername(username string) SpecOpts { + return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) { + setProcess(s) + if s.Linux != nil { + if c.Snapshotter == "" && c.SnapshotKey == "" { + if !isRootfsAbs(s.Root.Path) { + return errors.Errorf("rootfs absolute path is required") + } + user, err := getUserFromPath(s.Root.Path, func(u user.User) bool { + return u.Name == username + }) + if err != nil { + return err + } + s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) + return nil + } + if c.Snapshotter == "" { + return errors.Errorf("no snapshotter set for container") + } + if c.SnapshotKey == "" { + return errors.Errorf("rootfs snapshot not created for container") + } + snapshotter := client.SnapshotService(c.Snapshotter) + mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) + if err != nil { + return err + } + return mount.WithTempMount(ctx, mounts, func(root string) error { + user, err := getUserFromPath(root, func(u user.User) bool { + return u.Name == username + }) + if err != nil { + return err + } + s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) + return nil + }) + } else if s.Windows != nil { + s.Process.User.Username = username + } else { + return errors.New("spec does not contain Linux or Windows section") + } + return nil + } +} + +// WithAdditionalGIDs sets the OCI spec's additionalGids array to any additional groups listed +// for a particular user in the /etc/groups file of the image's root filesystem +// The passed in user can be either a uid or a username. +func WithAdditionalGIDs(userstr string) SpecOpts { + return func(ctx context.Context, client Client, c *containers.Container, s *Spec) (err error) { + // For LCOW additional GID's not supported + if s.Windows != nil { + return nil + } + setProcess(s) + setAdditionalGids := func(root string) error { + var username string + uid, err := strconv.Atoi(userstr) + if err == nil { + user, err := getUserFromPath(root, func(u user.User) bool { + return u.Uid == uid + }) + if err != nil { + if os.IsNotExist(err) || err == errNoUsersFound { + return nil + } + return err + } + username = user.Name + } else { + username = userstr + } + gids, err := getSupplementalGroupsFromPath(root, func(g user.Group) bool { + // we only want supplemental groups + if g.Name == username { + return false + } + for _, entry := range g.List { + if entry == username { + return true + } + } + return false + }) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + s.Process.User.AdditionalGids = gids + return nil + } + if c.Snapshotter == "" && c.SnapshotKey == "" { + if !isRootfsAbs(s.Root.Path) { + return errors.Errorf("rootfs absolute path is required") + } + return setAdditionalGids(s.Root.Path) + } + if c.Snapshotter == "" { + return errors.Errorf("no snapshotter set for container") + } + if c.SnapshotKey == "" { + return errors.Errorf("rootfs snapshot not created for container") + } + snapshotter := client.SnapshotService(c.Snapshotter) + mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) + if err != nil { + return err + } + return mount.WithTempMount(ctx, mounts, setAdditionalGids) + } +} + +// WithCapabilities sets Linux capabilities on the process +func WithCapabilities(caps []string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setCapabilities(s) + + s.Process.Capabilities.Bounding = caps + s.Process.Capabilities.Effective = caps + s.Process.Capabilities.Permitted = caps + s.Process.Capabilities.Inheritable = caps + + return nil + } +} + +// WithAllCapabilities sets all linux capabilities for the process +var WithAllCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { + return WithCapabilities(GetAllCapabilities())(ctx, client, c, s) +} + +// GetAllCapabilities returns all caps up to CAP_LAST_CAP +// or CAP_BLOCK_SUSPEND on RHEL6 +func GetAllCapabilities() []string { + last := capability.CAP_LAST_CAP + // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap + if last == capability.Cap(63) { + last = capability.CAP_BLOCK_SUSPEND + } + var caps []string + for _, cap := range capability.List() { + if cap > last { + continue + } + caps = append(caps, "CAP_"+strings.ToUpper(cap.String())) + } + return caps +} + +func capsContain(caps []string, s string) bool { + for _, c := range caps { + if c == s { + return true + } + } + return false +} + +func removeCap(caps *[]string, s string) { + var newcaps []string + for _, c := range *caps { + if c == s { + continue + } + newcaps = append(newcaps, c) + } + *caps = newcaps +} + +// WithAddedCapabilities adds the provided capabilities +func WithAddedCapabilities(caps []string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setCapabilities(s) + for _, c := range caps { + for _, cl := range []*[]string{ + &s.Process.Capabilities.Bounding, + &s.Process.Capabilities.Effective, + &s.Process.Capabilities.Permitted, + &s.Process.Capabilities.Inheritable, + } { + if !capsContain(*cl, c) { + *cl = append(*cl, c) + } + } + } + return nil + } +} + +// WithDroppedCapabilities removes the provided capabilities +func WithDroppedCapabilities(caps []string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setCapabilities(s) + for _, c := range caps { + for _, cl := range []*[]string{ + &s.Process.Capabilities.Bounding, + &s.Process.Capabilities.Effective, + &s.Process.Capabilities.Permitted, + &s.Process.Capabilities.Inheritable, + } { + removeCap(cl, c) + } + } + return nil + } +} + +// WithAmbientCapabilities set the Linux ambient capabilities for the process +// Ambient capabilities should only be set for non-root users or the caller should +// understand how these capabilities are used and set +func WithAmbientCapabilities(caps []string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setCapabilities(s) + + s.Process.Capabilities.Ambient = caps + return nil + } +} + +var errNoUsersFound = errors.New("no users found") + +func getUserFromPath(root string, filter func(user.User) bool) (user.User, error) { + ppath, err := fs.RootPath(root, "/etc/passwd") + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswdFileFilter(ppath, filter) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, errNoUsersFound + } + return users[0], nil +} + +var errNoGroupsFound = errors.New("no groups found") + +func getGIDFromPath(root string, filter func(user.Group) bool) (gid uint32, err error) { + gpath, err := fs.RootPath(root, "/etc/group") + if err != nil { + return 0, err + } + groups, err := user.ParseGroupFileFilter(gpath, filter) + if err != nil { + return 0, err + } + if len(groups) == 0 { + return 0, errNoGroupsFound + } + g := groups[0] + return uint32(g.Gid), nil +} + +func getSupplementalGroupsFromPath(root string, filter func(user.Group) bool) ([]uint32, error) { + gpath, err := fs.RootPath(root, "/etc/group") + if err != nil { + return []uint32{}, err + } + groups, err := user.ParseGroupFileFilter(gpath, filter) + if err != nil { + return []uint32{}, err + } + if len(groups) == 0 { + // if there are no additional groups; just return an empty set + return []uint32{}, nil + } + addlGids := []uint32{} + for _, grp := range groups { + addlGids = append(addlGids, uint32(grp.Gid)) + } + return addlGids, nil +} + +func isRootfsAbs(root string) bool { + return filepath.IsAbs(root) +} + +// WithMaskedPaths sets the masked paths option +func WithMaskedPaths(paths []string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + s.Linux.MaskedPaths = paths + return nil + } +} + +// WithReadonlyPaths sets the read only paths option +func WithReadonlyPaths(paths []string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + s.Linux.ReadonlyPaths = paths + return nil + } +} + +// WithWriteableSysfs makes any sysfs mounts writeable +func WithWriteableSysfs(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + for i, m := range s.Mounts { + if m.Type == "sysfs" { + var options []string + for _, o := range m.Options { + if o == "ro" { + o = "rw" + } + options = append(options, o) + } + s.Mounts[i].Options = options + } + } + return nil +} + +// WithWriteableCgroupfs makes any cgroup mounts writeable +func WithWriteableCgroupfs(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + for i, m := range s.Mounts { + if m.Type == "cgroup" { + var options []string + for _, o := range m.Options { + if o == "ro" { + o = "rw" + } + options = append(options, o) + } + s.Mounts[i].Options = options + } + } + return nil +} + +// WithSelinuxLabel sets the process SELinux label +func WithSelinuxLabel(label string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setProcess(s) + s.Process.SelinuxLabel = label + return nil + } +} + +// WithApparmorProfile sets the Apparmor profile for the process +func WithApparmorProfile(profile string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setProcess(s) + s.Process.ApparmorProfile = profile + return nil + } +} + +// WithSeccompUnconfined clears the seccomp profile +func WithSeccompUnconfined(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + s.Linux.Seccomp = nil + return nil +} + +// WithParentCgroupDevices uses the default cgroup setup to inherit the container's parent cgroup's +// allowed and denied devices +func WithParentCgroupDevices(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + if s.Linux.Resources == nil { + s.Linux.Resources = &specs.LinuxResources{} + } + s.Linux.Resources.Devices = nil + return nil +} + +// WithAllDevicesAllowed permits READ WRITE MKNOD on all devices nodes for the container +func WithAllDevicesAllowed(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + if s.Linux.Resources == nil { + s.Linux.Resources = &specs.LinuxResources{} + } + s.Linux.Resources.Devices = []specs.LinuxDeviceCgroup{ + { + Allow: true, + Access: rwm, + }, + } + return nil +} + +// WithDefaultUnixDevices adds the default devices for unix such as /dev/null, /dev/random to +// the container's resource cgroup spec +func WithDefaultUnixDevices(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + if s.Linux.Resources == nil { + s.Linux.Resources = &specs.LinuxResources{} + } + intptr := func(i int64) *int64 { + return &i + } + s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, []specs.LinuxDeviceCgroup{ + { + // "/dev/null", + Type: "c", + Major: intptr(1), + Minor: intptr(3), + Access: rwm, + Allow: true, + }, + { + // "/dev/random", + Type: "c", + Major: intptr(1), + Minor: intptr(8), + Access: rwm, + Allow: true, + }, + { + // "/dev/full", + Type: "c", + Major: intptr(1), + Minor: intptr(7), + Access: rwm, + Allow: true, + }, + { + // "/dev/tty", + Type: "c", + Major: intptr(5), + Minor: intptr(0), + Access: rwm, + Allow: true, + }, + { + // "/dev/zero", + Type: "c", + Major: intptr(1), + Minor: intptr(5), + Access: rwm, + Allow: true, + }, + { + // "/dev/urandom", + Type: "c", + Major: intptr(1), + Minor: intptr(9), + Access: rwm, + Allow: true, + }, + { + // "/dev/console", + Type: "c", + Major: intptr(5), + Minor: intptr(1), + Access: rwm, + Allow: true, + }, + // /dev/pts/ - pts namespaces are "coming soon" + { + Type: "c", + Major: intptr(136), + Access: rwm, + Allow: true, + }, + { + Type: "c", + Major: intptr(5), + Minor: intptr(2), + Access: rwm, + Allow: true, + }, + { + // tuntap + Type: "c", + Major: intptr(10), + Minor: intptr(200), + Access: rwm, + Allow: true, + }, + }...) + return nil +} + +// WithPrivileged sets up options for a privileged container +var WithPrivileged = Compose( + WithAllCapabilities, + WithMaskedPaths(nil), + WithReadonlyPaths(nil), + WithWriteableSysfs, + WithWriteableCgroupfs, + WithSelinuxLabel(""), + WithApparmorProfile(""), + WithSeccompUnconfined, +) + +// WithWindowsHyperV sets the Windows.HyperV section for HyperV isolation of containers. +func WithWindowsHyperV(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + if s.Windows.HyperV == nil { + s.Windows.HyperV = &specs.WindowsHyperV{} + } + return nil +} + +// WithMemoryLimit sets the `Linux.LinuxResources.Memory.Limit` section to the +// `limit` specified if the `Linux` section is not `nil`. Additionally sets the +// `Windows.WindowsResources.Memory.Limit` section if the `Windows` section is +// not `nil`. +func WithMemoryLimit(limit uint64) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Linux != nil { + if s.Linux.Resources == nil { + s.Linux.Resources = &specs.LinuxResources{} + } + if s.Linux.Resources.Memory == nil { + s.Linux.Resources.Memory = &specs.LinuxMemory{} + } + l := int64(limit) + s.Linux.Resources.Memory.Limit = &l + } + if s.Windows != nil { + if s.Windows.Resources == nil { + s.Windows.Resources = &specs.WindowsResources{} + } + if s.Windows.Resources.Memory == nil { + s.Windows.Resources.Memory = &specs.WindowsMemoryResources{} + } + s.Windows.Resources.Memory.Limit = &limit + } + return nil + } +} + +// WithAnnotations appends or replaces the annotations on the spec with the +// provided annotations +func WithAnnotations(annotations map[string]string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Annotations == nil { + s.Annotations = make(map[string]string) + } + for k, v := range annotations { + s.Annotations[k] = v + } + return nil + } +} + +// WithLinuxDevices adds the provided linux devices to the spec +func WithLinuxDevices(devices []specs.LinuxDevice) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + s.Linux.Devices = append(s.Linux.Devices, devices...) + return nil + } +} + +var ErrNotADevice = errors.New("not a device node") + +// WithLinuxDevice adds the device specified by path to the spec +func WithLinuxDevice(path, permissions string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + setResources(s) + + dev, err := deviceFromPath(path, permissions) + if err != nil { + return err + } + + s.Linux.Devices = append(s.Linux.Devices, *dev) + + s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, specs.LinuxDeviceCgroup{ + Type: dev.Type, + Allow: true, + Major: &dev.Major, + Minor: &dev.Minor, + Access: permissions, + }) + + return nil + } +} + +// WithEnvFile adds environment variables from a file to the container's spec +func WithEnvFile(path string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + var vars []string + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + if sc.Err() != nil { + return sc.Err() + } + vars = append(vars, sc.Text()) + } + return WithEnv(vars)(nil, nil, nil, s) + } +} + +// ErrNoShmMount is returned when there is no /dev/shm mount specified in the config +// and an Opts was trying to set a configuration value on the mount. +var ErrNoShmMount = errors.New("no /dev/shm mount specified") + +// WithDevShmSize sets the size of the /dev/shm mount for the container. +// +// The size value is specified in kb, kilobytes. +func WithDevShmSize(kb int64) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + for _, m := range s.Mounts { + if m.Source == "shm" && m.Type == "tmpfs" { + for i, o := range m.Options { + if strings.HasPrefix(o, "size=") { + m.Options[i] = fmt.Sprintf("size=%dk", kb) + return nil + } + } + m.Options = append(m.Options, fmt.Sprintf("size=%dk", kb)) + return nil + } + } + return ErrNoShmMount + } +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go new file mode 100644 index 00000000..1448ee78 --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go @@ -0,0 +1,121 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containerd/containerd/containers" + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/unix" +) + +// WithHostDevices adds all the hosts device nodes to the container's spec +func WithHostDevices(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + + devs, err := getDevices("/dev") + if err != nil { + return err + } + s.Linux.Devices = append(s.Linux.Devices, devs...) + return nil +} + +func getDevices(path string) ([]specs.LinuxDevice, error) { + files, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + var out []specs.LinuxDevice + for _, f := range files { + switch { + case f.IsDir(): + switch f.Name() { + // ".lxc" & ".lxd-mounts" added to address https://github.com/lxc/lxd/issues/2825 + // ".udev" added to address https://github.com/opencontainers/runc/issues/2093 + case "pts", "shm", "fd", "mqueue", ".lxc", ".lxd-mounts", ".udev": + continue + default: + sub, err := getDevices(filepath.Join(path, f.Name())) + if err != nil { + return nil, err + } + + out = append(out, sub...) + continue + } + case f.Name() == "console": + continue + } + device, err := deviceFromPath(filepath.Join(path, f.Name()), "rwm") + if err != nil { + if err == ErrNotADevice { + continue + } + if os.IsNotExist(err) { + continue + } + return nil, err + } + out = append(out, *device) + } + return out, nil +} + +func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) { + var stat unix.Stat_t + if err := unix.Lstat(path, &stat); err != nil { + return nil, err + } + + var ( + // The type is 32bit on mips. + devNumber = uint64(stat.Rdev) // nolint: unconvert + major = unix.Major(devNumber) + minor = unix.Minor(devNumber) + ) + if major == 0 { + return nil, ErrNotADevice + } + + var ( + devType string + mode = stat.Mode + ) + switch { + case mode&unix.S_IFBLK == unix.S_IFBLK: + devType = "b" + case mode&unix.S_IFCHR == unix.S_IFCHR: + devType = "c" + } + fm := os.FileMode(mode) + return &specs.LinuxDevice{ + Type: devType, + Path: path, + Major: int64(major), + Minor: int64(minor), + FileMode: &fm, + UID: &stat.Uid, + GID: &stat.Gid, + }, nil +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go new file mode 100644 index 00000000..bcabf0ef --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go @@ -0,0 +1,120 @@ +// +build !linux,!windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containerd/containerd/containers" + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/unix" +) + +// WithHostDevices adds all the hosts device nodes to the container's spec +func WithHostDevices(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + + devs, err := getDevices("/dev") + if err != nil { + return err + } + s.Linux.Devices = append(s.Linux.Devices, devs...) + return nil +} + +func getDevices(path string) ([]specs.LinuxDevice, error) { + files, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + var out []specs.LinuxDevice + for _, f := range files { + switch { + case f.IsDir(): + switch f.Name() { + // ".lxc" & ".lxd-mounts" added to address https://github.com/lxc/lxd/issues/2825 + // ".udev" added to address https://github.com/opencontainers/runc/issues/2093 + case "pts", "shm", "fd", "mqueue", ".lxc", ".lxd-mounts", ".udev": + continue + default: + sub, err := getDevices(filepath.Join(path, f.Name())) + if err != nil { + return nil, err + } + + out = append(out, sub...) + continue + } + case f.Name() == "console": + continue + } + device, err := deviceFromPath(filepath.Join(path, f.Name()), "rwm") + if err != nil { + if err == ErrNotADevice { + continue + } + if os.IsNotExist(err) { + continue + } + return nil, err + } + out = append(out, *device) + } + return out, nil +} + +func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) { + var stat unix.Stat_t + if err := unix.Lstat(path, &stat); err != nil { + return nil, err + } + + var ( + devNumber = uint64(stat.Rdev) + major = unix.Major(devNumber) + minor = unix.Minor(devNumber) + ) + if major == 0 { + return nil, ErrNotADevice + } + + var ( + devType string + mode = stat.Mode + ) + switch { + case mode&unix.S_IFBLK == unix.S_IFBLK: + devType = "b" + case mode&unix.S_IFCHR == unix.S_IFCHR: + devType = "c" + } + fm := os.FileMode(mode) + return &specs.LinuxDevice{ + Type: devType, + Path: path, + Major: int64(major), + Minor: int64(minor), + FileMode: &fm, + UID: &stat.Uid, + GID: &stat.Gid, + }, nil +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go new file mode 100644 index 00000000..47caf192 --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go @@ -0,0 +1,79 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "context" + + "github.com/containerd/containerd/containers" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the +// `count` specified. +func WithWindowsCPUCount(count uint64) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Windows.Resources == nil { + s.Windows.Resources = &specs.WindowsResources{} + } + if s.Windows.Resources.CPU == nil { + s.Windows.Resources.CPU = &specs.WindowsCPUResources{} + } + s.Windows.Resources.CPU.Count = &count + return nil + } +} + +// WithWindowsIgnoreFlushesDuringBoot sets `Windows.IgnoreFlushesDuringBoot`. +func WithWindowsIgnoreFlushesDuringBoot() SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + s.Windows.IgnoreFlushesDuringBoot = true + return nil + } +} + +// WithWindowNetworksAllowUnqualifiedDNSQuery sets `Windows.IgnoreFlushesDuringBoot`. +func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + if s.Windows.Network == nil { + s.Windows.Network = &specs.WindowsNetwork{} + } + + s.Windows.Network.AllowUnqualifiedDNSQuery = true + return nil + } +} + +// WithHostDevices adds all the hosts device nodes to the container's spec +// +// Not supported on windows +func WithHostDevices(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + return nil +} + +func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) { + return nil, errors.New("device from path not supported on Windows") +} diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go new file mode 100644 index 00000000..3ad22a10 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/compare.go @@ -0,0 +1,229 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import specs "github.com/opencontainers/image-spec/specs-go/v1" + +// MatchComparer is able to match and compare platforms to +// filter and sort platforms. +type MatchComparer interface { + Matcher + + Less(specs.Platform, specs.Platform) bool +} + +// Only returns a match comparer for a single platform +// using default resolution logic for the platform. +// +// For ARMv8, will also match ARMv7, ARMv6 and ARMv5 (for 32bit runtimes) +// For ARMv7, will also match ARMv6 and ARMv5 +// For ARMv6, will also match ARMv5 +func Only(platform specs.Platform) MatchComparer { + platform = Normalize(platform) + if platform.Architecture == "arm" { + if platform.Variant == "v8" { + return orderedPlatformComparer{ + matchers: []Matcher{ + &matcher{ + Platform: platform, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v7", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v6", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v5", + }, + }, + }, + } + } + if platform.Variant == "v7" { + return orderedPlatformComparer{ + matchers: []Matcher{ + &matcher{ + Platform: platform, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v6", + }, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v5", + }, + }, + }, + } + } + if platform.Variant == "v6" { + return orderedPlatformComparer{ + matchers: []Matcher{ + &matcher{ + Platform: platform, + }, + &matcher{ + Platform: specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v5", + }, + }, + }, + } + } + } + + return singlePlatformComparer{ + Matcher: &matcher{ + Platform: platform, + }, + } +} + +// Ordered returns a platform MatchComparer which matches any of the platforms +// but orders them in order they are provided. +func Ordered(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return orderedPlatformComparer{ + matchers: matchers, + } +} + +// Any returns a platform MatchComparer which matches any of the platforms +// with no preference for ordering. +func Any(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return anyPlatformComparer{ + matchers: matchers, + } +} + +// All is a platform MatchComparer which matches all platforms +// with preference for ordering. +var All MatchComparer = allPlatformComparer{} + +type singlePlatformComparer struct { + Matcher +} + +func (c singlePlatformComparer) Less(p1, p2 specs.Platform) bool { + return c.Match(p1) && !c.Match(p2) +} + +type orderedPlatformComparer struct { + matchers []Matcher +} + +func (c orderedPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { + for _, m := range c.matchers { + p1m := m.Match(p1) + p2m := m.Match(p2) + if p1m && !p2m { + return true + } + if p1m || p2m { + return false + } + } + return false +} + +type anyPlatformComparer struct { + matchers []Matcher +} + +func (c anyPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { + var p1m, p2m bool + for _, m := range c.matchers { + if !p1m && m.Match(p1) { + p1m = true + } + if !p2m && m.Match(p2) { + p2m = true + } + if p1m && p2m { + return false + } + } + // If one matches, and the other does, sort match first + return p1m && !p2m +} + +type allPlatformComparer struct{} + +func (allPlatformComparer) Match(specs.Platform) bool { + return true +} + +func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { + return false +} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go new file mode 100644 index 00000000..ffa8970a --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -0,0 +1,122 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "bufio" + "os" + "runtime" + "strings" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" +) + +// Present the ARM instruction set architecture, eg: v7, v8 +var cpuVariant string + +func init() { + if isArmArch(runtime.GOARCH) { + cpuVariant = getCPUVariant() + } else { + cpuVariant = "" + } +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + if !isLinuxOS(runtime.GOOS) { + return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS) + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern) +} + +func getCPUVariant() string { + if runtime.GOOS == "windows" { + // Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + var variant string + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "unknown" + } + + return variant + } + + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + log.L.WithError(err).Error("failure getting variant") + return "" + } + + switch strings.ToLower(variant) { + case "8", "aarch64": + // special case: if running a 32-bit userspace on aarch64, the variant should be "v7" + if runtime.GOARCH == "arm" { + variant = "v7" + } else { + variant = "v8" + } + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "unknown" + } + + return variant +} diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go new file mode 100644 index 00000000..6ede9406 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -0,0 +1,114 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + "strings" +) + +// isLinuxOS returns true if the operating system is Linux. +// +// The OS value should be normalized before calling this function. +func isLinuxOS(os string) bool { + return os == "linux" +} + +// These function are generated from https://golang.org/src/go/build/syslist.go. +// +// We use switch statements because they are slightly faster than map lookups +// and use a little less memory. + +// isKnownOS returns true if we know about the operating system. +// +// The OS value should be normalized before calling this function. +func isKnownOS(os string) bool { + switch os { + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + return true + } + return false +} + +// isArmArch returns true if the architecture is ARM. +// +// The arch value should be normalized before being passed to this function. +func isArmArch(arch string) bool { + switch arch { + case "arm", "arm64": + return true + } + return false +} + +// isKnownArch returns true if we know about the architecture. +// +// The arch value should be normalized before being passed to this function. +func isKnownArch(arch string) bool { + switch arch { + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + return true + } + return false +} + +func normalizeOS(os string) string { + if os == "" { + return runtime.GOOS + } + os = strings.ToLower(os) + + switch os { + case "macos": + os = "darwin" + } + return os +} + +// normalizeArch normalizes the architecture. +func normalizeArch(arch, variant string) (string, string) { + arch, variant = strings.ToLower(arch), strings.ToLower(variant) + switch arch { + case "i386": + arch = "386" + variant = "" + case "x86_64", "x86-64": + arch = "amd64" + variant = "" + case "aarch64", "arm64": + arch = "arm64" + switch variant { + case "8", "v8": + variant = "" + } + case "armhf": + arch = "arm" + variant = "v7" + case "armel": + arch = "arm" + variant = "v6" + case "arm": + switch variant { + case "", "7": + variant = "v7" + case "5", "6", "8": + variant = "v" + variant + } + } + + return arch, variant +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go new file mode 100644 index 00000000..a14d80e5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -0,0 +1,38 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultString returns the default string specifier for the platform. +func DefaultString() string { + return Format(DefaultSpec()) +} + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant, + } +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go new file mode 100644 index 00000000..e8a7d5ff --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -0,0 +1,24 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Only(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go new file mode 100644 index 00000000..0defbd36 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -0,0 +1,31 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + OS: "linux", + Architecture: "amd64", + }) +} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go new file mode 100644 index 00000000..77d3f184 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -0,0 +1,278 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package platforms provides a toolkit for normalizing, matching and +// specifying container platforms. +// +// Centered around OCI platform specifications, we define a string-based +// specifier syntax that can be used for user input. With a specifier, users +// only need to specify the parts of the platform that are relevant to their +// context, providing an operating system or architecture or both. +// +// How do I use this package? +// +// The vast majority of use cases should simply use the match function with +// user input. The first step is to parse a specifier into a matcher: +// +// m, err := Parse("linux") +// if err != nil { ... } +// +// Once you have a matcher, use it to match against the platform declared by a +// component, typically from an image or runtime. Since extracting an images +// platform is a little more involved, we'll use an example against the +// platform default: +// +// if ok := m.Match(Default()); !ok { /* doesn't match */ } +// +// This can be composed in loops for resolving runtimes or used as a filter for +// fetch and select images. +// +// More details of the specifier syntax and platform spec follow. +// +// Declaring Platform Support +// +// Components that have strict platform requirements should use the OCI +// platform specification to declare their support. Typically, this will be +// images and runtimes that should make these declaring which platform they +// support specifically. This looks roughly as follows: +// +// type Platform struct { +// Architecture string +// OS string +// Variant string +// } +// +// Most images and runtimes should at least set Architecture and OS, according +// to their GOARCH and GOOS values, respectively (follow the OCI image +// specification when in doubt). ARM should set variant under certain +// discussions, which are outlined below. +// +// Platform Specifiers +// +// While the OCI platform specifications provide a tool for components to +// specify structured information, user input typically doesn't need the full +// context and much can be inferred. To solve this problem, we introduced +// "specifiers". A specifier has the format +// `||/[/]`. The user can provide either the +// operating system or the architecture or both. +// +// An example of a common specifier is `linux/amd64`. If the host has a default +// of runtime that matches this, the user can simply provide the component that +// matters. For example, if a image provides amd64 and arm64 support, the +// operating system, `linux` can be inferred, so they only have to provide +// `arm64` or `amd64`. Similar behavior is implemented for operating systems, +// where the architecture may be known but a runtime may support images from +// different operating systems. +// +// Normalization +// +// Because not all users are familiar with the way the Go runtime represents +// platforms, several normalizations have been provided to make this package +// easier to user. +// +// The following are performed for architectures: +// +// Value Normalized +// aarch64 arm64 +// armhf arm +// armel arm/v6 +// i386 386 +// x86_64 amd64 +// x86-64 amd64 +// +// We also normalize the operating system `macos` to `darwin`. +// +// ARM Support +// +// To qualify ARM architecture, the Variant field is used to qualify the arm +// version. The most common arm version, v7, is represented without the variant +// unless it is explicitly provided. This is treated as equivalent to armhf. A +// previous architecture, armel, will be normalized to arm/v6. +// +// While these normalizations are provided, their support on arm platforms has +// not yet been fully implemented and tested. +package platforms + +import ( + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/containerd/containerd/errdefs" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) +) + +// Matcher matches platforms specifications, provided by an image or runtime. +type Matcher interface { + Match(platform specs.Platform) bool +} + +// NewMatcher returns a simple matcher based on the provided platform +// specification. The returned matcher only looks for equality based on os, +// architecture and variant. +// +// One may implement their own matcher if this doesn't provide the required +// functionality. +// +// Applications should opt to use `Match` over directly parsing specifiers. +func NewMatcher(platform specs.Platform) Matcher { + return &matcher{ + Platform: Normalize(platform), + } +} + +type matcher struct { + specs.Platform +} + +func (m *matcher) Match(platform specs.Platform) bool { + normalized := Normalize(platform) + return m.OS == normalized.OS && + m.Architecture == normalized.Architecture && + m.Variant == normalized.Variant +} + +func (m *matcher) String() string { + return Format(m.Platform) +} + +// Parse parses the platform specifier syntax into a platform declaration. +// +// Platform specifiers are in the format `||/[/]`. +// The minimum required information for a platform specifier is the operating +// system or architecture. If there is only a single string (no slashes), the +// value will be matched against the known set of operating systems, then fall +// back to the known set of architectures. The missing component will be +// inferred based on the local environment. +func Parse(specifier string) (specs.Platform, error) { + if strings.Contains(specifier, "*") { + // TODO(stevvooe): need to work out exact wildcard handling + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) + } + + parts := strings.Split(specifier, "/") + + for _, part := range parts { + if !specifierRe.MatchString(part) { + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) + } + } + + var p specs.Platform + switch len(parts) { + case 1: + // in this case, we will test that the value might be an OS, then look + // it up. If it is not known, we'll treat it as an architecture. Since + // we have very little information about the platform here, we are + // going to be a little more strict if we don't know about the argument + // value. + p.OS = normalizeOS(parts[0]) + if isKnownOS(p.OS) { + // picks a default architecture + p.Architecture = runtime.GOARCH + if p.Architecture == "arm" && cpuVariant != "v7" { + p.Variant = cpuVariant + } + + return p, nil + } + + p.Architecture, p.Variant = normalizeArch(parts[0], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + if isKnownArch(p.Architecture) { + p.OS = runtime.GOOS + return p, nil + } + + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) + case 2: + // In this case, we treat as a regular os/arch pair. We don't care + // about whether or not we know of the platform. + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + + return p, nil + case 3: + // we have a fully specified variant, this is rare + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) + if p.Architecture == "arm64" && p.Variant == "" { + p.Variant = "v8" + } + + return p, nil + } + + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) +} + +// MustParse is like Parses but panics if the specifier cannot be parsed. +// Simplifies initialization of global variables. +func MustParse(specifier string) specs.Platform { + p, err := Parse(specifier) + if err != nil { + panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) + } + return p +} + +// Format returns a string specifier from the provided platform specification. +func Format(platform specs.Platform) string { + if platform.OS == "" { + return "unknown" + } + + return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant) +} + +func joinNotEmpty(s ...string) string { + var ss []string + for _, s := range s { + if s == "" { + continue + } + + ss = append(ss, s) + } + + return strings.Join(ss, "/") +} + +// Normalize validates and translate the platform to the canonical value. +// +// For example, if "Aarch64" is encountered, we change it to "arm64" or if +// "x86_64" is encountered, it becomes "amd64". +func Normalize(platform specs.Platform) specs.Platform { + platform.OS = normalizeOS(platform.OS) + platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) + + // these fields are deprecated, remove them + platform.OSFeatures = nil + platform.OSVersion = "" + + return platform +} diff --git a/vendor/github.com/containerd/containerd/plugin/context.go b/vendor/github.com/containerd/containerd/plugin/context.go new file mode 100644 index 00000000..75b7366f --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/context.go @@ -0,0 +1,146 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "context" + "path/filepath" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/events/exchange" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// InitContext is used for plugin inititalization +type InitContext struct { + Context context.Context + Root string + State string + Config interface{} + Address string + TTRPCAddress string + Events *exchange.Exchange + + Meta *Meta // plugins can fill in metadata at init. + + plugins *Set +} + +// NewContext returns a new plugin InitContext +func NewContext(ctx context.Context, r *Registration, plugins *Set, root, state string) *InitContext { + return &InitContext{ + Context: ctx, + Root: filepath.Join(root, r.URI()), + State: filepath.Join(state, r.URI()), + Meta: &Meta{ + Exports: map[string]string{}, + }, + plugins: plugins, + } +} + +// Get returns the first plugin by its type +func (i *InitContext) Get(t Type) (interface{}, error) { + return i.plugins.Get(t) +} + +// Meta contains information gathered from the registration and initialization +// process. +type Meta struct { + Platforms []ocispec.Platform // platforms supported by plugin + Exports map[string]string // values exported by plugin + Capabilities []string // feature switches for plugin +} + +// Plugin represents an initialized plugin, used with an init context. +type Plugin struct { + Registration *Registration // registration, as initialized + Config interface{} // config, as initialized + Meta *Meta + + instance interface{} + err error // will be set if there was an error initializing the plugin +} + +// Err returns the errors during initialization. +// returns nil if not error was encountered +func (p *Plugin) Err() error { + return p.err +} + +// Instance returns the instance and any initialization error of the plugin +func (p *Plugin) Instance() (interface{}, error) { + return p.instance, p.err +} + +// Set defines a plugin collection, used with InitContext. +// +// This maintains ordering and unique indexing over the set. +// +// After iteratively instantiating plugins, this set should represent, the +// ordered, initialization set of plugins for a containerd instance. +type Set struct { + ordered []*Plugin // order of initialization + byTypeAndID map[Type]map[string]*Plugin +} + +// NewPluginSet returns an initialized plugin set +func NewPluginSet() *Set { + return &Set{ + byTypeAndID: make(map[Type]map[string]*Plugin), + } +} + +// Add a plugin to the set +func (ps *Set) Add(p *Plugin) error { + if byID, typeok := ps.byTypeAndID[p.Registration.Type]; !typeok { + ps.byTypeAndID[p.Registration.Type] = map[string]*Plugin{ + p.Registration.ID: p, + } + } else if _, idok := byID[p.Registration.ID]; !idok { + byID[p.Registration.ID] = p + } else { + return errors.Wrapf(errdefs.ErrAlreadyExists, "plugin %v already initialized", p.Registration.URI()) + } + + ps.ordered = append(ps.ordered, p) + return nil +} + +// Get returns the first plugin by its type +func (ps *Set) Get(t Type) (interface{}, error) { + for _, v := range ps.byTypeAndID[t] { + return v.Instance() + } + return nil, errors.Wrapf(errdefs.ErrNotFound, "no plugins registered for %s", t) +} + +// GetAll plugins in the set +func (i *InitContext) GetAll() []*Plugin { + return i.plugins.ordered +} + +// GetByType returns all plugins with the specific type. +func (i *InitContext) GetByType(t Type) (map[string]*Plugin, error) { + p, ok := i.plugins.byTypeAndID[t] + if !ok { + return nil, errors.Wrapf(errdefs.ErrNotFound, "no plugins registered for %s", t) + } + + return p, nil +} diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go new file mode 100644 index 00000000..c7d27241 --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/plugin.go @@ -0,0 +1,239 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "fmt" + "sync" + + "github.com/containerd/ttrpc" + "github.com/pkg/errors" + "google.golang.org/grpc" +) + +var ( + // ErrNoType is returned when no type is specified + ErrNoType = errors.New("plugin: no type") + // ErrNoPluginID is returned when no id is specified + ErrNoPluginID = errors.New("plugin: no id") + // ErrIDRegistered is returned when a duplicate id is already registered + ErrIDRegistered = errors.New("plugin: id already registered") + // ErrSkipPlugin is used when a plugin is not initialized and should not be loaded, + // this allows the plugin loader differentiate between a plugin which is configured + // not to load and one that fails to load. + ErrSkipPlugin = errors.New("skip plugin") + + // ErrInvalidRequires will be thrown if the requirements for a plugin are + // defined in an invalid manner. + ErrInvalidRequires = errors.New("invalid requires") +) + +// IsSkipPlugin returns true if the error is skipping the plugin +func IsSkipPlugin(err error) bool { + return errors.Cause(err) == ErrSkipPlugin +} + +// Type is the type of the plugin +type Type string + +func (t Type) String() string { return string(t) } + +const ( + // InternalPlugin implements an internal plugin to containerd + InternalPlugin Type = "io.containerd.internal.v1" + // RuntimePlugin implements a runtime + RuntimePlugin Type = "io.containerd.runtime.v1" + // RuntimePluginV2 implements a runtime v2 + RuntimePluginV2 Type = "io.containerd.runtime.v2" + // ServicePlugin implements a internal service + ServicePlugin Type = "io.containerd.service.v1" + // GRPCPlugin implements a grpc service + GRPCPlugin Type = "io.containerd.grpc.v1" + // SnapshotPlugin implements a snapshotter + SnapshotPlugin Type = "io.containerd.snapshotter.v1" + // TaskMonitorPlugin implements a task monitor + TaskMonitorPlugin Type = "io.containerd.monitor.v1" + // DiffPlugin implements a differ + DiffPlugin Type = "io.containerd.differ.v1" + // MetadataPlugin implements a metadata store + MetadataPlugin Type = "io.containerd.metadata.v1" + // ContentPlugin implements a content store + ContentPlugin Type = "io.containerd.content.v1" + // GCPlugin implements garbage collection policy + GCPlugin Type = "io.containerd.gc.v1" +) + +const ( + // RuntimeLinuxV1 is the legacy linux runtime + RuntimeLinuxV1 = "io.containerd.runtime.v1.linux" + // RuntimeRuncV1 is the runc runtime that supports a single container + RuntimeRuncV1 = "io.containerd.runc.v1" + // RuntimeRuncV2 is the runc runtime that supports multiple containers per shim + RuntimeRuncV2 = "io.containerd.runc.v2" +) + +// Registration contains information for registering a plugin +type Registration struct { + // Type of the plugin + Type Type + // ID of the plugin + ID string + // Config specific to the plugin + Config interface{} + // Requires is a list of plugins that the registered plugin requires to be available + Requires []Type + + // InitFn is called when initializing a plugin. The registration and + // context are passed in. The init function may modify the registration to + // add exports, capabilities and platform support declarations. + InitFn func(*InitContext) (interface{}, error) + // Disable the plugin from loading + Disable bool +} + +// Init the registered plugin +func (r *Registration) Init(ic *InitContext) *Plugin { + p, err := r.InitFn(ic) + return &Plugin{ + Registration: r, + Config: ic.Config, + Meta: ic.Meta, + instance: p, + err: err, + } +} + +// URI returns the full plugin URI +func (r *Registration) URI() string { + return fmt.Sprintf("%s.%s", r.Type, r.ID) +} + +// Service allows GRPC services to be registered with the underlying server +type Service interface { + Register(*grpc.Server) error +} + +// TTRPCService allows TTRPC services to be registered with the underlying server +type TTRPCService interface { + RegisterTTRPC(*ttrpc.Server) error +} + +// TCPService allows GRPC services to be registered with the underlying tcp server +type TCPService interface { + RegisterTCP(*grpc.Server) error +} + +var register = struct { + sync.RWMutex + r []*Registration +}{} + +// Load loads all plugins at the provided path into containerd +func Load(path string) (err error) { + defer func() { + if v := recover(); v != nil { + rerr, ok := v.(error) + if !ok { + rerr = fmt.Errorf("%s", v) + } + err = rerr + } + }() + return loadPlugins(path) +} + +// Register allows plugins to register +func Register(r *Registration) { + register.Lock() + defer register.Unlock() + + if r.Type == "" { + panic(ErrNoType) + } + if r.ID == "" { + panic(ErrNoPluginID) + } + if err := checkUnique(r); err != nil { + panic(err) + } + + var last bool + for _, requires := range r.Requires { + if requires == "*" { + last = true + } + } + if last && len(r.Requires) != 1 { + panic(ErrInvalidRequires) + } + + register.r = append(register.r, r) +} + +func checkUnique(r *Registration) error { + for _, registered := range register.r { + if r.URI() == registered.URI() { + return errors.Wrap(ErrIDRegistered, r.URI()) + } + } + return nil +} + +// DisableFilter filters out disabled plugins +type DisableFilter func(r *Registration) bool + +// Graph returns an ordered list of registered plugins for initialization. +// Plugins in disableList specified by id will be disabled. +func Graph(filter DisableFilter) (ordered []*Registration) { + register.RLock() + defer register.RUnlock() + + for _, r := range register.r { + if filter(r) { + r.Disable = true + } + } + + added := map[*Registration]bool{} + for _, r := range register.r { + if r.Disable { + continue + } + children(r, added, &ordered) + if !added[r] { + ordered = append(ordered, r) + added[r] = true + } + } + return ordered +} + +func children(reg *Registration, added map[*Registration]bool, ordered *[]*Registration) { + for _, t := range reg.Requires { + for _, r := range register.r { + if !r.Disable && + r.URI() != reg.URI() && + (t == "*" || r.Type == t) { + children(r, added, ordered) + if !added[r] { + *ordered = append(*ordered, r) + added[r] = true + } + } + } + } +} diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go new file mode 100644 index 00000000..927fe619 --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go @@ -0,0 +1,62 @@ +// +build go1.8,!windows,amd64,!static_build,!gccgo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +import ( + "fmt" + "path/filepath" + "plugin" + "runtime" +) + +// loadPlugins loads all plugins for the OS and Arch +// that containerd is built for inside the provided path +func loadPlugins(path string) error { + abs, err := filepath.Abs(path) + if err != nil { + return err + } + pattern := filepath.Join(abs, fmt.Sprintf( + "*-%s-%s.%s", + runtime.GOOS, + runtime.GOARCH, + getLibExt(), + )) + libs, err := filepath.Glob(pattern) + if err != nil { + return err + } + for _, lib := range libs { + if _, err := plugin.Open(lib); err != nil { + return err + } + } + return nil +} + +// getLibExt returns a platform specific lib extension for +// the platform that containerd is running on +func getLibExt() string { + switch runtime.GOOS { + case "windows": + return "dll" + default: + return "so" + } +} diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_other.go b/vendor/github.com/containerd/containerd/plugin/plugin_other.go new file mode 100644 index 00000000..0c5e1416 --- /dev/null +++ b/vendor/github.com/containerd/containerd/plugin/plugin_other.go @@ -0,0 +1,24 @@ +// +build !go1.8 windows !amd64 static_build gccgo + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package plugin + +func loadPlugins(path string) error { + // plugins not supported until 1.8 + return nil +} diff --git a/vendor/github.com/containerd/containerd/reference/docker/reference.go b/vendor/github.com/containerd/containerd/reference/docker/reference.go new file mode 100644 index 00000000..0998639b --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/docker/reference.go @@ -0,0 +1,797 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package docker provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package docker + +import ( + "errors" + "fmt" + "path" + "regexp" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/containerd/containerd/reference/reference.go b/vendor/github.com/containerd/containerd/reference/reference.go new file mode 100644 index 00000000..562ab0d4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/reference.go @@ -0,0 +1,162 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package reference + +import ( + "errors" + "fmt" + "net/url" + "path" + "regexp" + "strings" + + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrInvalid is returned when there is an invalid reference + ErrInvalid = errors.New("invalid reference") + // ErrObjectRequired is returned when the object is required + ErrObjectRequired = errors.New("object required") + // ErrHostnameRequired is returned when the hostname is required + ErrHostnameRequired = errors.New("hostname required") +) + +// Spec defines the main components of a reference specification. +// +// A reference specification is a schema-less URI parsed into common +// components. The two main components, locator and object, are required to be +// supported by remotes. It represents a superset of the naming define in +// docker's reference schema. It aims to be compatible but not prescriptive. +// +// While the interpretation of the components, locator and object, are up to +// the remote, we define a few common parts, accessible via helper methods. +// +// The first is the hostname, which is part of the locator. This doesn't need +// to map to a physical resource, but it must parse as a hostname. We refer to +// this as the namespace. +// +// The other component made accessible by helper method is the digest. This is +// part of the object identifier, always prefixed with an '@'. If present, the +// remote may use the digest portion directly or resolve it against a prefix. +// If the object does not include the `@` symbol, the return value for `Digest` +// will be empty. +type Spec struct { + // Locator is the host and path portion of the specification. The host + // portion may refer to an actual host or just a namespace of related + // images. + // + // Typically, the locator may used to resolve the remote to fetch specific + // resources. + Locator string + + // Object contains the identifier for the remote resource. Classically, + // this is a tag but can refer to anything in a remote. By convention, any + // portion that may be a partial or whole digest will be preceded by an + // `@`. Anything preceding the `@` will be referred to as the "tag". + // + // In practice, we will see this broken down into the following formats: + // + // 1. + // 2. @ + // 3. @ + // + // We define the tag to be anything except '@' and ':'. may + // be a full valid digest or shortened version, possibly with elided + // algorithm. + Object string +} + +var splitRe = regexp.MustCompile(`[:@]`) + +// Parse parses the string into a structured ref. +func Parse(s string) (Spec, error) { + u, err := url.Parse("dummy://" + s) + if err != nil { + return Spec{}, err + } + + if u.Scheme != "dummy" { + return Spec{}, ErrInvalid + } + + if u.Host == "" { + return Spec{}, ErrHostnameRequired + } + + var object string + + if idx := splitRe.FindStringIndex(u.Path); idx != nil { + // This allows us to retain the @ to signify digests or shortened digests in + // the object. + object = u.Path[idx[0]:] + if object[:1] == ":" { + object = object[1:] + } + u.Path = u.Path[:idx[0]] + } + + return Spec{ + Locator: path.Join(u.Host, u.Path), + Object: object, + }, nil +} + +// Hostname returns the hostname portion of the locator. +// +// Remotes are not required to directly access the resources at this host. This +// method is provided for convenience. +func (r Spec) Hostname() string { + i := strings.Index(r.Locator, "/") + + if i < 0 { + return r.Locator + } + return r.Locator[:i] +} + +// Digest returns the digest portion of the reference spec. This may be a +// partial or invalid digest, which may be used to lookup a complete digest. +func (r Spec) Digest() digest.Digest { + _, dgst := SplitObject(r.Object) + return dgst +} + +// String returns the normalized string for the ref. +func (r Spec) String() string { + if r.Object == "" { + return r.Locator + } + if r.Object[:1] == "@" { + return fmt.Sprintf("%v%v", r.Locator, r.Object) + } + + return fmt.Sprintf("%v:%v", r.Locator, r.Object) +} + +// SplitObject provides two parts of the object spec, delimited by an `@` +// symbol. +// +// Either may be empty and it is the callers job to validate them +// appropriately. +func SplitObject(obj string) (tag string, dgst digest.Digest) { + parts := strings.SplitAfterN(obj, "@", 2) + if len(parts) < 2 { + return parts[0], "" + } + return parts[0], digest.Digest(parts[1]) +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth.go b/vendor/github.com/containerd/containerd/remotes/docker/auth.go new file mode 100644 index 00000000..70cfdea4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/auth.go @@ -0,0 +1,198 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "net/http" + "sort" + "strings" +) + +type authenticationScheme byte + +const ( + basicAuth authenticationScheme = 1 << iota // Defined in RFC 7617 + digestAuth // Defined in RFC 7616 + bearerAuth // Defined in RFC 6750 +) + +// challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type challenge struct { + // scheme is the auth-scheme according to RFC 2617 + scheme authenticationScheme + + // parameters are the auth-params according to RFC 2617 + parameters map[string]string +} + +type byScheme []challenge + +func (bs byScheme) Len() int { return len(bs) } +func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] } + +// Sort in priority order: token > digest > basic +func (bs byScheme) Less(i, j int) bool { return bs[i].scheme > bs[j].scheme } + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +func parseAuthHeader(header http.Header) []challenge { + challenges := []challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + var s authenticationScheme + switch v { + case "basic": + s = basicAuth + case "digest": + s = digestAuth + case "bearer": + s = bearerAuth + default: + continue + } + challenges = append(challenges, challenge{scheme: s, parameters: p}) + } + sort.Stable(byScheme(challenges)) + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + for { + var pkey string + pkey, s = expectToken(skipSpace(s)) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + return + } + s = s[1:] + } +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go new file mode 100644 index 00000000..59d989ef --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go @@ -0,0 +1,482 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/net/context/ctxhttp" +) + +type dockerAuthorizer struct { + credentials func(string) (string, string, error) + + client *http.Client + header http.Header + mu sync.Mutex + + // indexed by host name + handlers map[string]*authHandler +} + +// NewAuthorizer creates a Docker authorizer using the provided function to +// get credentials for the token server or basic auth. +// Deprecated: Use NewDockerAuthorizer +func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer { + return NewDockerAuthorizer(WithAuthClient(client), WithAuthCreds(f)) +} + +type authorizerConfig struct { + credentials func(string) (string, string, error) + client *http.Client + header http.Header +} + +// AuthorizerOpt configures an authorizer +type AuthorizerOpt func(*authorizerConfig) + +// WithAuthClient provides the HTTP client for the authorizer +func WithAuthClient(client *http.Client) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.client = client + } +} + +// WithAuthCreds provides a credential function to the authorizer +func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.credentials = creds + } +} + +// WithAuthHeader provides HTTP headers for authorization +func WithAuthHeader(hdr http.Header) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.header = hdr + } +} + +// NewDockerAuthorizer creates an authorizer using Docker's registry +// authentication spec. +// See https://docs.docker.com/registry/spec/auth/ +func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer { + var ao authorizerConfig + for _, opt := range opts { + opt(&ao) + } + + if ao.client == nil { + ao.client = http.DefaultClient + } + + return &dockerAuthorizer{ + credentials: ao.credentials, + client: ao.client, + header: ao.header, + handlers: make(map[string]*authHandler), + } +} + +// Authorize handles auth request. +func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error { + // skip if there is no auth handler + ah := a.getAuthHandler(req.URL.Host) + if ah == nil { + return nil + } + + auth, err := ah.authorize(ctx) + if err != nil { + return err + } + + req.Header.Set("Authorization", auth) + return nil +} + +func (a *dockerAuthorizer) getAuthHandler(host string) *authHandler { + a.mu.Lock() + defer a.mu.Unlock() + + return a.handlers[host] +} + +func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error { + last := responses[len(responses)-1] + host := last.Request.URL.Host + + a.mu.Lock() + defer a.mu.Unlock() + for _, c := range parseAuthHeader(last.Header) { + if c.scheme == bearerAuth { + if err := invalidAuthorization(c, responses); err != nil { + delete(a.handlers, host) + return err + } + + // reuse existing handler + // + // assume that one registry will return the common + // challenge information, including realm and service. + // and the resource scope is only different part + // which can be provided by each request. + if _, ok := a.handlers[host]; ok { + return nil + } + + common, err := a.generateTokenOptions(ctx, host, c) + if err != nil { + return err + } + + a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common) + return nil + } else if c.scheme == basicAuth && a.credentials != nil { + username, secret, err := a.credentials(host) + if err != nil { + return err + } + + if username != "" && secret != "" { + common := tokenOptions{ + username: username, + secret: secret, + } + + a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common) + return nil + } + } + } + return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") +} + +func (a *dockerAuthorizer) generateTokenOptions(ctx context.Context, host string, c challenge) (tokenOptions, error) { + realm, ok := c.parameters["realm"] + if !ok { + return tokenOptions{}, errors.New("no realm specified for token auth challenge") + } + + realmURL, err := url.Parse(realm) + if err != nil { + return tokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm") + } + + to := tokenOptions{ + realm: realmURL.String(), + service: c.parameters["service"], + } + + scope, ok := c.parameters["scope"] + if ok { + to.scopes = append(to.scopes, scope) + } else { + log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge") + } + + if a.credentials != nil { + to.username, to.secret, err = a.credentials(host) + if err != nil { + return tokenOptions{}, err + } + } + return to, nil +} + +// authResult is used to control limit rate. +type authResult struct { + sync.WaitGroup + token string + err error +} + +// authHandler is used to handle auth request per registry server. +type authHandler struct { + sync.Mutex + + header http.Header + + client *http.Client + + // only support basic and bearer schemes + scheme authenticationScheme + + // common contains common challenge answer + common tokenOptions + + // scopedTokens caches token indexed by scopes, which used in + // bearer auth case + scopedTokens map[string]*authResult +} + +func newAuthHandler(client *http.Client, hdr http.Header, scheme authenticationScheme, opts tokenOptions) *authHandler { + return &authHandler{ + header: hdr, + client: client, + scheme: scheme, + common: opts, + scopedTokens: map[string]*authResult{}, + } +} + +func (ah *authHandler) authorize(ctx context.Context) (string, error) { + switch ah.scheme { + case basicAuth: + return ah.doBasicAuth(ctx) + case bearerAuth: + return ah.doBearerAuth(ctx) + default: + return "", errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") + } +} + +func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) { + username, secret := ah.common.username, ah.common.secret + + if username == "" || secret == "" { + return "", fmt.Errorf("failed to handle basic auth because missing username or secret") + } + + auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret)) + return fmt.Sprintf("Basic %s", auth), nil +} + +func (ah *authHandler) doBearerAuth(ctx context.Context) (string, error) { + // copy common tokenOptions + to := ah.common + + to.scopes = getTokenScopes(ctx, to.scopes) + + // Docs: https://docs.docker.com/registry/spec/auth/scope + scoped := strings.Join(to.scopes, " ") + + ah.Lock() + if r, exist := ah.scopedTokens[scoped]; exist { + ah.Unlock() + r.Wait() + return r.token, r.err + } + + // only one fetch token job + r := new(authResult) + r.Add(1) + ah.scopedTokens[scoped] = r + ah.Unlock() + + // fetch token for the resource scope + var ( + token string + err error + ) + if to.secret != "" { + // credential information is provided, use oauth POST endpoint + token, err = ah.fetchTokenWithOAuth(ctx, to) + err = errors.Wrap(err, "failed to fetch oauth token") + } else { + // do request anonymously + token, err = ah.fetchToken(ctx, to) + err = errors.Wrap(err, "failed to fetch anonymous token") + } + token = fmt.Sprintf("Bearer %s", token) + + r.token, r.err = token, err + r.Done() + return r.token, r.err +} + +type tokenOptions struct { + realm string + service string + scopes []string + username string + secret string +} + +type postTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` +} + +func (ah *authHandler) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) { + form := url.Values{} + if len(to.scopes) > 0 { + form.Set("scope", strings.Join(to.scopes, " ")) + } + form.Set("service", to.service) + // TODO: Allow setting client_id + form.Set("client_id", "containerd-client") + + if to.username == "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", to.secret) + } else { + form.Set("grant_type", "password") + form.Set("username", to.username) + form.Set("password", to.secret) + } + + req, err := http.NewRequest("POST", to.realm, strings.NewReader(form.Encode())) + if err != nil { + return "", err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + if ah.header != nil { + for k, v := range ah.header { + req.Header[k] = append(req.Header[k], v...) + } + } + + resp, err := ctxhttp.Do(ctx, ah.client, req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + // Registries without support for POST may return 404 for POST /v2/token. + // As of September 2017, GCR is known to return 404. + // As of February 2018, JFrog Artifactory is known to return 401. + if (resp.StatusCode == 405 && to.username != "") || resp.StatusCode == 404 || resp.StatusCode == 401 { + return ah.fetchToken(ctx, to) + } else if resp.StatusCode < 200 || resp.StatusCode >= 400 { + b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB + log.G(ctx).WithFields(logrus.Fields{ + "status": resp.Status, + "body": string(b), + }).Debugf("token request failed") + // TODO: handle error body and write debug output + return "", errors.Errorf("unexpected status: %s", resp.Status) + } + + decoder := json.NewDecoder(resp.Body) + + var tr postTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", fmt.Errorf("unable to decode token response: %s", err) + } + + return tr.AccessToken, nil +} + +type getTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +// fetchToken fetches a token using a GET request +func (ah *authHandler) fetchToken(ctx context.Context, to tokenOptions) (string, error) { + req, err := http.NewRequest("GET", to.realm, nil) + if err != nil { + return "", err + } + + if ah.header != nil { + for k, v := range ah.header { + req.Header[k] = append(req.Header[k], v...) + } + } + + reqParams := req.URL.Query() + + if to.service != "" { + reqParams.Add("service", to.service) + } + + for _, scope := range to.scopes { + reqParams.Add("scope", scope) + } + + if to.secret != "" { + req.SetBasicAuth(to.username, to.secret) + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := ctxhttp.Do(ctx, ah.client, req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + // TODO: handle error body and write debug output + return "", errors.Errorf("unexpected status: %s", resp.Status) + } + + decoder := json.NewDecoder(resp.Body) + + var tr getTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", fmt.Errorf("unable to decode token response: %s", err) + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken + } + + if tr.Token == "" { + return "", ErrNoToken + } + + return tr.Token, nil +} + +func invalidAuthorization(c challenge, responses []*http.Response) error { + errStr := c.parameters["error"] + if errStr == "" { + return nil + } + + n := len(responses) + if n == 1 || (n > 1 && !sameRequest(responses[n-2].Request, responses[n-1].Request)) { + return nil + } + + return errors.Wrapf(ErrInvalidAuthorization, "server message: %s", errStr) +} + +func sameRequest(r1, r2 *http.Request) bool { + if r1.Method != r2.Method { + return false + } + if *r1.URL != *r2.URL { + return false + } + return true +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/converter.go new file mode 100644 index 00000000..43e6b372 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/converter.go @@ -0,0 +1,88 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// LegacyConfigMediaType should be replaced by OCI image spec. +// +// More detail: docker/distribution#1622 +const LegacyConfigMediaType = "application/octet-stream" + +// ConvertManifest changes application/octet-stream to schema2 config media type if need. +// +// NOTE: +// 1. original manifest will be deleted by next gc round. +// 2. don't cover manifest list. +func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Descriptor) (ocispec.Descriptor, error) { + if !(desc.MediaType == images.MediaTypeDockerSchema2Manifest || + desc.MediaType == ocispec.MediaTypeImageManifest) { + + log.G(ctx).Warnf("do nothing for media type: %s", desc.MediaType) + return desc, nil + } + + // read manifest data + mb, err := content.ReadBlob(ctx, store, desc) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to read index data") + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(mb, &manifest); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal data into manifest") + } + + // check config media type + if manifest.Config.MediaType != LegacyConfigMediaType { + return desc, nil + } + + manifest.Config.MediaType = images.MediaTypeDockerSchema2Config + data, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal manifest") + } + + // update manifest with gc labels + desc.Digest = digest.Canonical.FromBytes(data) + desc.Size = int64(len(data)) + + labels := map[string]string{} + for i, c := range append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...) { + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = c.Digest.String() + } + + ref := remotes.MakeRefKey(ctx, desc) + if err := content.WriteBlob(ctx, store, ref, bytes.NewReader(data), desc, content.WithLabels(labels)); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to update content") + } + return desc, nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/errcode.go b/vendor/github.com/containerd/containerd/remotes/docker/errcode.go new file mode 100644 index 00000000..7d1e54f2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/errcode.go @@ -0,0 +1,283 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr := daErr.(type) { + case ErrorCode: + err = daErr.WithDetail(nil) + case Error: + err = daErr + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go b/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go new file mode 100644 index 00000000..b2bd4d82 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go @@ -0,0 +1,154 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go new file mode 100644 index 00000000..55c01bea --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go @@ -0,0 +1,202 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type dockerFetcher struct { + *dockerBase +} + +func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", desc.Digest)) + + hosts := r.filterHosts(HostCapabilityPull) + if len(hosts) == 0 { + return nil, errors.Wrap(errdefs.ErrNotFound, "no pull hosts") + } + + ctx, err := contextWithRepositoryScope(ctx, r.refspec, false) + if err != nil { + return nil, err + } + + return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) { + // firstly try fetch via external urls + for _, us := range desc.URLs { + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", us)) + + u, err := url.Parse(us) + if err != nil { + log.G(ctx).WithError(err).Debug("failed to parse") + continue + } + log.G(ctx).Debug("trying alternative url") + + // Try this first, parse it + host := RegistryHost{ + Client: http.DefaultClient, + Host: u.Host, + Scheme: u.Scheme, + Path: u.Path, + Capabilities: HostCapabilityPull, + } + req := r.request(host, http.MethodGet) + // Strip namespace from base + req.path = u.Path + if u.RawQuery != "" { + req.path = req.path + "?" + u.RawQuery + } + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + if errdefs.IsNotFound(err) { + continue // try one of the other urls. + } + + return nil, err + } + + return rc, nil + } + + // Try manifests endpoints for manifests types + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, + images.MediaTypeDockerSchema1Manifest, + ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: + + var firstErr error + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "manifests", desc.Digest.String()) + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + continue // try another host + } + + return rc, nil + } + + return nil, firstErr + } + + // Finally use blobs endpoints + var firstErr error + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "blobs", desc.Digest.String()) + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + continue // try another host + } + + return rc, nil + } + + if errdefs.IsNotFound(firstErr) { + firstErr = errors.Wrapf(errdefs.ErrNotFound, + "could not fetch content descriptor %v (%v) from remote", + desc.Digest, desc.MediaType) + } + + return nil, firstErr + + }) +} + +func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (io.ReadCloser, error) { + req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) + + if offset > 0 { + // Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints + // will return the header without supporting the range. The content + // range must always be checked. + req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } + + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + return nil, err + } + + if resp.StatusCode > 299 { + // TODO(stevvooe): When doing a offset specific request, we should + // really distinguish between a 206 and a 200. In the case of 200, we + // can discard the bytes, hiding the seek behavior from the + // implementation. + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound { + return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", req.String()) + } + var registryErr Errors + if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 { + return nil, errors.Errorf("unexpected status code %v: %v", req.String(), resp.Status) + } + return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error()) + } + if offset > 0 { + cr := resp.Header.Get("content-range") + if cr != "" { + if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) { + return nil, errors.Errorf("unhandled content range in response: %v", cr) + + } + } else { + // TODO: Should any cases where use of content range + // without the proper header be considered? + // 206 responses? + + // Discard up to offset + // Could use buffer pool here but this case should be rare + n, err := io.Copy(ioutil.Discard, io.LimitReader(resp.Body, offset)) + if err != nil { + return nil, errors.Wrap(err, "failed to discard to offset") + } + if n != offset { + return nil, errors.Errorf("unable to discard to offset") + } + + } + } + + return resp.Body, nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/handler.go b/vendor/github.com/containerd/containerd/remotes/docker/handler.go new file mode 100644 index 00000000..529cfbc2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/handler.go @@ -0,0 +1,154 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "net/url" + "strings" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/reference" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + // labelDistributionSource describes the source blob comes from. + labelDistributionSource = "containerd.io/distribution.source" +) + +// AppendDistributionSourceLabel updates the label of blob with distribution source. +func AppendDistributionSourceLabel(manager content.Manager, ref string) (images.HandlerFunc, error) { + refspec, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + return nil, err + } + + source, repo := u.Hostname(), strings.TrimPrefix(u.Path, "/") + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + info, err := manager.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } + + key := distributionSourceLabelKey(source) + + originLabel := "" + if info.Labels != nil { + originLabel = info.Labels[key] + } + value := appendDistributionSourceLabel(originLabel, repo) + + // The repo name has been limited under 256 and the distribution + // label might hit the limitation of label size, when blob data + // is used as the very, very common layer. + if err := labels.Validate(key, value); err != nil { + log.G(ctx).Warnf("skip to append distribution label: %s", err) + return nil, nil + } + + info = content.Info{ + Digest: desc.Digest, + Labels: map[string]string{ + key: value, + }, + } + _, err = manager.Update(ctx, info, fmt.Sprintf("labels.%s", key)) + return nil, err + }, nil +} + +func appendDistributionSourceLabel(originLabel, repo string) string { + repos := []string{} + if originLabel != "" { + repos = strings.Split(originLabel, ",") + } + repos = append(repos, repo) + + // use empty string to present duplicate items + for i := 1; i < len(repos); i++ { + tmp, j := repos[i], i-1 + for ; j >= 0 && repos[j] >= tmp; j-- { + if repos[j] == tmp { + tmp = "" + } + repos[j+1] = repos[j] + } + repos[j+1] = tmp + } + + i := 0 + for ; i < len(repos) && repos[i] == ""; i++ { + } + + return strings.Join(repos[i:], ",") +} + +func distributionSourceLabelKey(source string) string { + return fmt.Sprintf("%s.%s", labelDistributionSource, source) +} + +// selectRepositoryMountCandidate will select the repo which has longest +// common prefix components as the candidate. +func selectRepositoryMountCandidate(refspec reference.Spec, sources map[string]string) string { + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + // NOTE: basically, it won't be error here + return "" + } + + source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/") + repoLabel, ok := sources[distributionSourceLabelKey(source)] + if !ok || repoLabel == "" { + return "" + } + + n, match := 0, "" + components := strings.Split(target, "/") + for _, repo := range strings.Split(repoLabel, ",") { + // the target repo is not a candidate + if repo == target { + continue + } + + if l := commonPrefixComponents(components, repo); l >= n { + n, match = l, repo + } + } + return match +} + +func commonPrefixComponents(components []string, target string) int { + targetComponents := strings.Split(target, "/") + + i := 0 + for ; i < len(components) && i < len(targetComponents); i++ { + if components[i] != targetComponents[i] { + break + } + } + return i +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go b/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go new file mode 100644 index 00000000..9175b6a7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go @@ -0,0 +1,144 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" +) + +type httpReadSeeker struct { + size int64 + offset int64 + rc io.ReadCloser + open func(offset int64) (io.ReadCloser, error) + closed bool +} + +func newHTTPReadSeeker(size int64, open func(offset int64) (io.ReadCloser, error)) (io.ReadCloser, error) { + return &httpReadSeeker{ + size: size, + open: open, + }, nil +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.closed { + return 0, io.EOF + } + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.offset += int64(n) + return +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.closed { + return nil + } + hrs.closed = true + if hrs.rc != nil { + return hrs.rc.Close() + } + + return nil +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.closed { + return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: closed") + } + + abs := hrs.offset + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs += offset + case io.SeekEnd: + if hrs.size == -1 { + return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: unknown size, cannot seek from end") + } + abs = hrs.size + offset + default: + return 0, errors.Wrap(errdefs.ErrInvalidArgument, "Fetcher.Seek: invalid whence") + } + + if abs < 0 { + return 0, errors.Wrapf(errdefs.ErrInvalidArgument, "Fetcher.Seek: negative offset") + } + + if abs != hrs.offset { + if hrs.rc != nil { + if err := hrs.rc.Close(); err != nil { + log.L.WithError(err).Errorf("Fetcher.Seek: failed to close ReadCloser") + } + + hrs.rc = nil + } + + hrs.offset = abs + } + + return hrs.offset, nil +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.rc != nil { + return hrs.rc, nil + } + + if hrs.size == -1 || hrs.offset < hrs.size { + // only try to reopen the body request if we are seeking to a value + // less than the actual size. + if hrs.open == nil { + return nil, errors.Wrapf(errdefs.ErrNotImplemented, "cannot open") + } + + rc, err := hrs.open(hrs.offset) + if err != nil { + return nil, errors.Wrapf(err, "httpReaderSeeker: failed open") + } + + if hrs.rc != nil { + if err := hrs.rc.Close(); err != nil { + log.L.WithError(err).Errorf("httpReadSeeker: failed to close ReadCloser") + } + } + hrs.rc = rc + } else { + // There is an edge case here where offset == size of the content. If + // we seek, we will probably get an error for content that cannot be + // sought (?). In that case, we should err on committing the content, + // as the length is already satisfied but we just return the empty + // reader instead. + + hrs.rc = ioutil.NopCloser(bytes.NewReader([]byte{})) + } + + return hrs.rc, nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go new file mode 100644 index 00000000..96a45d39 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go @@ -0,0 +1,392 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type dockerPusher struct { + *dockerBase + object string + + // TODO: namespace tracker + tracker StatusTracker +} + +func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { + ctx, err := contextWithRepositoryScope(ctx, p.refspec, true) + if err != nil { + return nil, err + } + ref := remotes.MakeRefKey(ctx, desc) + status, err := p.tracker.GetStatus(ref) + if err == nil { + if status.Offset == status.Total { + return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "ref %v", ref) + } + // TODO: Handle incomplete status + } else if !errdefs.IsNotFound(err) { + return nil, errors.Wrap(err, "failed to get status") + } + + hosts := p.filterHosts(HostCapabilityPush) + if len(hosts) == 0 { + return nil, errors.Wrap(errdefs.ErrNotFound, "no push hosts") + } + + var ( + isManifest bool + existCheck []string + host = hosts[0] + ) + + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, + ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: + isManifest = true + existCheck = getManifestPath(p.object, desc.Digest) + default: + existCheck = []string{"blobs", desc.Digest.String()} + } + + req := p.request(host, http.MethodHead, existCheck...) + req.header.Set("Accept", strings.Join([]string{desc.MediaType, `*/*`}, ", ")) + + log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to") + + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + if errors.Cause(err) != ErrInvalidAuthorization { + return nil, err + } + log.G(ctx).WithError(err).Debugf("Unable to check existence, continuing with push") + } else { + if resp.StatusCode == http.StatusOK { + var exists bool + if isManifest && existCheck[1] != desc.Digest.String() { + dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) + if dgstHeader == desc.Digest { + exists = true + } + } else { + exists = true + } + + if exists { + p.tracker.SetStatus(ref, Status{ + Status: content.Status{ + Ref: ref, + // TODO: Set updated time? + }, + }) + return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) + } + } else if resp.StatusCode != http.StatusNotFound { + // TODO: log error + return nil, errors.Errorf("unexpected response: %s", resp.Status) + } + } + + if isManifest { + putPath := getManifestPath(p.object, desc.Digest) + req = p.request(host, http.MethodPut, putPath...) + req.header.Add("Content-Type", desc.MediaType) + } else { + // Start upload request + req = p.request(host, http.MethodPost, "blobs", "uploads/") + + var resp *http.Response + if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" { + preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo) + pctx := contextWithAppendPullRepositoryScope(ctx, fromRepo) + + // NOTE: the fromRepo might be private repo and + // auth service still can grant token without error. + // but the post request will fail because of 401. + // + // for the private repo, we should remove mount-from + // query and send the request again. + resp, err = preq.do(pctx) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusUnauthorized { + log.G(ctx).Debugf("failed to mount from repository %s", fromRepo) + + resp.Body.Close() + resp = nil + } + } + + if resp == nil { + resp, err = req.doWithRetries(ctx, nil) + if err != nil { + return nil, err + } + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusAccepted, http.StatusNoContent: + case http.StatusCreated: + p.tracker.SetStatus(ref, Status{ + Status: content.Status{ + Ref: ref, + }, + }) + return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) + default: + // TODO: log error + return nil, errors.Errorf("unexpected response: %s", resp.Status) + } + + var ( + location = resp.Header.Get("Location") + lurl *url.URL + lhost = host + ) + // Support paths without host in location + if strings.HasPrefix(location, "/") { + lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse location %v", location) + } + } else { + if !strings.Contains(location, "://") { + location = lhost.Scheme + "://" + location + } + lurl, err = url.Parse(location) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse location %v", location) + } + + if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme { + + lhost.Scheme = lurl.Scheme + lhost.Host = lurl.Host + log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination") + + // Strip authorizer if change to host or scheme + lhost.Authorizer = nil + } + } + q := lurl.Query() + q.Add("digest", desc.Digest.String()) + + req = p.request(lhost, http.MethodPut) + req.header.Set("Content-Type", "application/octet-stream") + req.path = lurl.Path + "?" + q.Encode() + } + p.tracker.SetStatus(ref, Status{ + Status: content.Status{ + Ref: ref, + Total: desc.Size, + Expected: desc.Digest, + StartedAt: time.Now(), + }, + }) + + // TODO: Support chunked upload + + pr, pw := io.Pipe() + respC := make(chan *http.Response, 1) + body := ioutil.NopCloser(pr) + + req.body = func() (io.ReadCloser, error) { + if body == nil { + return nil, errors.New("cannot reuse body, request must be retried") + } + // Only use the body once since pipe cannot be seeked + ob := body + body = nil + return ob, nil + } + req.size = desc.Size + + go func() { + defer close(respC) + resp, err = req.do(ctx) + if err != nil { + pr.CloseWithError(err) + return + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusNoContent: + default: + // TODO: log error + pr.CloseWithError(errors.Errorf("unexpected response: %s", resp.Status)) + } + respC <- resp + }() + + return &pushWriter{ + base: p.dockerBase, + ref: ref, + pipe: pw, + responseC: respC, + isManifest: isManifest, + expected: desc.Digest, + tracker: p.tracker, + }, nil +} + +func getManifestPath(object string, dgst digest.Digest) []string { + if i := strings.IndexByte(object, '@'); i >= 0 { + if object[i+1:] != dgst.String() { + // use digest, not tag + object = "" + } else { + // strip @ for registry path to make tag + object = object[:i] + } + + } + + if object == "" { + return []string{"manifests", dgst.String()} + } + + return []string{"manifests", object} +} + +type pushWriter struct { + base *dockerBase + ref string + + pipe *io.PipeWriter + responseC <-chan *http.Response + isManifest bool + + expected digest.Digest + tracker StatusTracker +} + +func (pw *pushWriter) Write(p []byte) (n int, err error) { + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return n, err + } + n, err = pw.pipe.Write(p) + status.Offset += int64(n) + status.UpdatedAt = time.Now() + pw.tracker.SetStatus(pw.ref, status) + return +} + +func (pw *pushWriter) Close() error { + return pw.pipe.Close() +} + +func (pw *pushWriter) Status() (content.Status, error) { + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return content.Status{}, err + } + return status.Status, nil + +} + +func (pw *pushWriter) Digest() digest.Digest { + // TODO: Get rid of this function? + return pw.expected +} + +func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + // Check whether read has already thrown an error + if _, err := pw.pipe.Write([]byte{}); err != nil && err != io.ErrClosedPipe { + return errors.Wrap(err, "pipe error before commit") + } + + if err := pw.pipe.Close(); err != nil { + return err + } + // TODO: Update status to determine committing + + // TODO: timeout waiting for response + resp := <-pw.responseC + if resp == nil { + return errors.New("no response") + } + + // 201 is specified return status, some registries return + // 200, 202 or 204. + switch resp.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted: + default: + return errors.Errorf("unexpected status: %s", resp.Status) + } + + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return errors.Wrap(err, "failed to get status") + } + + if size > 0 && size != status.Offset { + return errors.Errorf("unexpected size %d, expected %d", status.Offset, size) + } + + if expected == "" { + expected = status.Expected + } + + actual, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) + if err != nil { + return errors.Wrap(err, "invalid content digest in response") + } + + if actual != expected { + return errors.Errorf("got digest %s, expected %s", actual, expected) + } + + return nil +} + +func (pw *pushWriter) Truncate(size int64) error { + // TODO: if blob close request and start new request at offset + // TODO: always error on manifest + return errors.New("cannot truncate remote upload") +} + +func requestWithMountFrom(req *request, mount, from string) *request { + creq := *req + + sep := "?" + if strings.Contains(creq.path, sep) { + sep = "&" + } + + creq.path = creq.path + sep + "mount=" + mount + "&from=" + from + + return &creq +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/registry.go b/vendor/github.com/containerd/containerd/remotes/docker/registry.go new file mode 100644 index 00000000..ae24f41e --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/registry.go @@ -0,0 +1,202 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "net/http" +) + +// HostCapabilities represent the capabilities of the registry +// host. This also represents the set of operations for which +// the registry host may be trusted to perform. +// +// For example pushing is a capability which should only be +// performed on an upstream source, not a mirror. +// Resolving (the process of converting a name into a digest) +// must be considered a trusted operation and only done by +// a host which is trusted (or more preferably by secure process +// which can prove the provenance of the mapping). A public +// mirror should never be trusted to do a resolve action. +// +// | Registry Type | Pull | Resolve | Push | +// |------------------|------|---------|------| +// | Public Registry | yes | yes | yes | +// | Private Registry | yes | yes | yes | +// | Public Mirror | yes | no | no | +// | Private Mirror | yes | yes | no | +type HostCapabilities uint8 + +const ( + // HostCapabilityPull represents the capability to fetch manifests + // and blobs by digest + HostCapabilityPull HostCapabilities = 1 << iota + + // HostCapabilityResolve represents the capability to fetch manifests + // by name + HostCapabilityResolve + + // HostCapabilityPush represents the capability to push blobs and + // manifests + HostCapabilityPush + + // Reserved for future capabilities (i.e. search, catalog, remove) +) + +func (c HostCapabilities) Has(t HostCapabilities) bool { + return c&t == t +} + +// RegistryHost represents a complete configuration for a registry +// host, representing the capabilities, authorizations, connection +// configuration, and location. +type RegistryHost struct { + Client *http.Client + Authorizer Authorizer + Host string + Scheme string + Path string + Capabilities HostCapabilities +} + +// RegistryHosts fetches the registry hosts for a given namespace, +// provided by the host component of an distribution image reference. +type RegistryHosts func(string) ([]RegistryHost, error) + +// Registries joins multiple registry configuration functions, using the same +// order as provided within the arguments. When an empty registry configuration +// is returned with a nil error, the next function will be called. +// NOTE: This function will not join configurations, as soon as a non-empty +// configuration is returned from a configuration function, it will be returned +// to the caller. +func Registries(registries ...RegistryHosts) RegistryHosts { + return func(host string) ([]RegistryHost, error) { + for _, registry := range registries { + config, err := registry(host) + if err != nil { + return config, err + } + if len(config) > 0 { + return config, nil + } + } + return nil, nil + } +} + +type registryOpts struct { + authorizer Authorizer + plainHTTP func(string) (bool, error) + host func(string) (string, error) + client *http.Client +} + +// RegistryOpt defines a registry default option +type RegistryOpt func(*registryOpts) + +// WithPlainHTTP configures registries to use plaintext http scheme +// for the provided host match function. +func WithPlainHTTP(f func(string) (bool, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.plainHTTP = f + } +} + +// WithAuthorizer configures the default authorizer for a registry +func WithAuthorizer(a Authorizer) RegistryOpt { + return func(opts *registryOpts) { + opts.authorizer = a + } +} + +// WithHostTranslator defines the default translator to use for registry hosts +func WithHostTranslator(h func(string) (string, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.host = h + } +} + +// WithClient configures the default http client for a registry +func WithClient(c *http.Client) RegistryOpt { + return func(opts *registryOpts) { + opts.client = c + } +} + +// ConfigureDefaultRegistries is used to create a default configuration for +// registries. For more advanced configurations or per-domain setups, +// the RegistryHosts interface should be used directly. +// NOTE: This function will always return a non-empty value or error +func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts { + var opts registryOpts + for _, opt := range ropts { + opt(&opts) + } + + return func(host string) ([]RegistryHost, error) { + config := RegistryHost{ + Client: opts.client, + Authorizer: opts.authorizer, + Host: host, + Scheme: "https", + Path: "/v2", + Capabilities: HostCapabilityPull | HostCapabilityResolve | HostCapabilityPush, + } + + if config.Client == nil { + config.Client = http.DefaultClient + } + + if opts.plainHTTP != nil { + match, err := opts.plainHTTP(host) + if err != nil { + return nil, err + } + if match { + config.Scheme = "http" + } + } + + if opts.host != nil { + var err error + config.Host, err = opts.host(config.Host) + if err != nil { + return nil, err + } + } else if host == "docker.io" { + config.Host = "registry-1.docker.io" + } + + return []RegistryHost{config}, nil + } +} + +// MatchAllHosts is a host match function which is always true. +func MatchAllHosts(string) (bool, error) { + return true, nil +} + +// MatchLocalhost is a host match function which returns true for +// localhost. +func MatchLocalhost(host string) (bool, error) { + for _, s := range []string{"localhost", "127.0.0.1", "[::1]"} { + if len(host) >= len(s) && host[0:len(s)] == s && (len(host) == len(s) || host[len(s)] == ':') { + return true, nil + } + } + return host == "::1", nil + +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go new file mode 100644 index 00000000..90a0e34d --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -0,0 +1,611 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "path" + "strings" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/reference" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker/schema1" + "github.com/containerd/containerd/version" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/net/context/ctxhttp" +) + +var ( + // ErrNoToken is returned if a request is successful but the body does not + // contain an authorization token. + ErrNoToken = errors.New("authorization server did not include a token in the response") + + // ErrInvalidAuthorization is used when credentials are passed to a server but + // those credentials are rejected. + ErrInvalidAuthorization = errors.New("authorization failed") + + // MaxManifestSize represents the largest size accepted from a registry + // during resolution. Larger manifests may be accepted using a + // resolution method other than the registry. + // + // NOTE: The max supported layers by some runtimes is 128 and individual + // layers will not contribute more than 256 bytes, making a + // reasonable limit for a large image manifests of 32K bytes. + // 4M bytes represents a much larger upper bound for images which may + // contain large annotations or be non-images. A proper manifest + // design puts large metadata in subobjects, as is consistent the + // intent of the manifest design. + MaxManifestSize int64 = 4 * 1048 * 1048 +) + +// Authorizer is used to authorize HTTP requests based on 401 HTTP responses. +// An Authorizer is responsible for caching tokens or credentials used by +// requests. +type Authorizer interface { + // Authorize sets the appropriate `Authorization` header on the given + // request. + // + // If no authorization is found for the request, the request remains + // unmodified. It may also add an `Authorization` header as + // "bearer " + // "basic " + Authorize(context.Context, *http.Request) error + + // AddResponses adds a 401 response for the authorizer to consider when + // authorizing requests. The last response should be unauthorized and + // the previous requests are used to consider redirects and retries + // that may have led to the 401. + // + // If response is not handled, returns `ErrNotImplemented` + AddResponses(context.Context, []*http.Response) error +} + +// ResolverOptions are used to configured a new Docker register resolver +type ResolverOptions struct { + // Hosts returns registry host configurations for a namespace. + Hosts RegistryHosts + + // Headers are the HTTP request header fields sent by the resolver + Headers http.Header + + // Tracker is used to track uploads to the registry. This is used + // since the registry does not have upload tracking and the existing + // mechanism for getting blob upload status is expensive. + Tracker StatusTracker + + // Authorizer is used to authorize registry requests + // Deprecated: use Hosts + Authorizer Authorizer + + // Credentials provides username and secret given a host. + // If username is empty but a secret is given, that secret + // is interpreted as a long lived token. + // Deprecated: use Hosts + Credentials func(string) (string, string, error) + + // Host provides the hostname given a namespace. + // Deprecated: use Hosts + Host func(string) (string, error) + + // PlainHTTP specifies to use plain http and not https + // Deprecated: use Hosts + PlainHTTP bool + + // Client is the http client to used when making registry requests + // Deprecated: use Hosts + Client *http.Client +} + +// DefaultHost is the default host function. +func DefaultHost(ns string) (string, error) { + if ns == "docker.io" { + return "registry-1.docker.io", nil + } + return ns, nil +} + +type dockerResolver struct { + hosts RegistryHosts + header http.Header + resolveHeader http.Header + tracker StatusTracker +} + +// NewResolver returns a new resolver to a Docker registry +func NewResolver(options ResolverOptions) remotes.Resolver { + if options.Tracker == nil { + options.Tracker = NewInMemoryTracker() + } + + if options.Headers == nil { + options.Headers = make(http.Header) + } + if _, ok := options.Headers["User-Agent"]; !ok { + options.Headers.Set("User-Agent", "containerd/"+version.Version) + } + + resolveHeader := http.Header{} + if _, ok := options.Headers["Accept"]; !ok { + // set headers for all the types we support for resolution. + resolveHeader.Set("Accept", strings.Join([]string{ + images.MediaTypeDockerSchema2Manifest, + images.MediaTypeDockerSchema2ManifestList, + ocispec.MediaTypeImageManifest, + ocispec.MediaTypeImageIndex, "*/*"}, ", ")) + } else { + resolveHeader["Accept"] = options.Headers["Accept"] + delete(options.Headers, "Accept") + } + + if options.Hosts == nil { + opts := []RegistryOpt{} + if options.Host != nil { + opts = append(opts, WithHostTranslator(options.Host)) + } + + if options.Authorizer == nil { + options.Authorizer = NewDockerAuthorizer( + WithAuthClient(options.Client), + WithAuthHeader(options.Headers), + WithAuthCreds(options.Credentials)) + } + opts = append(opts, WithAuthorizer(options.Authorizer)) + + if options.Client != nil { + opts = append(opts, WithClient(options.Client)) + } + if options.PlainHTTP { + opts = append(opts, WithPlainHTTP(MatchAllHosts)) + } else { + opts = append(opts, WithPlainHTTP(MatchLocalhost)) + } + options.Hosts = ConfigureDefaultRegistries(opts...) + } + return &dockerResolver{ + hosts: options.Hosts, + header: options.Headers, + resolveHeader: resolveHeader, + tracker: options.Tracker, + } +} + +func getManifestMediaType(resp *http.Response) string { + // Strip encoding data (manifests should always be ascii JSON) + contentType := resp.Header.Get("Content-Type") + if sp := strings.IndexByte(contentType, ';'); sp != -1 { + contentType = contentType[0:sp] + } + + // As of Apr 30 2019 the registry.access.redhat.com registry does not specify + // the content type of any data but uses schema1 manifests. + if contentType == "text/plain" { + contentType = images.MediaTypeDockerSchema1Manifest + } + return contentType +} + +type countingReader struct { + reader io.Reader + bytesRead int64 +} + +func (r *countingReader) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + r.bytesRead += int64(n) + return n, err +} + +var _ remotes.Resolver = &dockerResolver{} + +func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) { + refspec, err := reference.Parse(ref) + if err != nil { + return "", ocispec.Descriptor{}, err + } + + if refspec.Object == "" { + return "", ocispec.Descriptor{}, reference.ErrObjectRequired + } + + base, err := r.base(refspec) + if err != nil { + return "", ocispec.Descriptor{}, err + } + + var ( + lastErr error + paths [][]string + dgst = refspec.Digest() + caps = HostCapabilityPull + ) + + if dgst != "" { + if err := dgst.Validate(); err != nil { + // need to fail here, since we can't actually resolve the invalid + // digest. + return "", ocispec.Descriptor{}, err + } + + // turns out, we have a valid digest, make a url. + paths = append(paths, []string{"manifests", dgst.String()}) + + // fallback to blobs on not found. + paths = append(paths, []string{"blobs", dgst.String()}) + } else { + // Add + paths = append(paths, []string{"manifests", refspec.Object}) + caps |= HostCapabilityResolve + } + + hosts := base.filterHosts(caps) + if len(hosts) == 0 { + return "", ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "no resolve hosts") + } + + ctx, err = contextWithRepositoryScope(ctx, refspec, false) + if err != nil { + return "", ocispec.Descriptor{}, err + } + + for _, u := range paths { + for _, host := range hosts { + ctx := log.WithLogger(ctx, log.G(ctx).WithField("host", host.Host)) + + req := base.request(host, http.MethodHead, u...) + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) + } + + log.G(ctx).Debug("resolving") + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + if errors.Cause(err) == ErrInvalidAuthorization { + err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization") + } + // Store the error for referencing later + if lastErr == nil { + lastErr = err + } + continue // try another host + } + resp.Body.Close() // don't care about body contents. + + if resp.StatusCode > 299 { + if resp.StatusCode == http.StatusNotFound { + continue + } + return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status) + } + size := resp.ContentLength + contentType := getManifestMediaType(resp) + + // if no digest was provided, then only a resolve + // trusted registry was contacted, in this case use + // the digest header (or content from GET) + if dgst == "" { + // this is the only point at which we trust the registry. we use the + // content headers to assemble a descriptor for the name. when this becomes + // more robust, we mostly get this information from a secure trust store. + dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) + + if dgstHeader != "" && size != -1 { + if err := dgstHeader.Validate(); err != nil { + return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader) + } + dgst = dgstHeader + } + } + if dgst == "" || size == -1 { + log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead") + + req = base.request(host, http.MethodGet, u...) + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) + } + + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + return "", ocispec.Descriptor{}, err + } + defer resp.Body.Close() + + bodyReader := countingReader{reader: resp.Body} + + contentType = getManifestMediaType(resp) + if dgst == "" { + if contentType == images.MediaTypeDockerSchema1Manifest { + b, err := schema1.ReadStripSignature(&bodyReader) + if err != nil { + return "", ocispec.Descriptor{}, err + } + + dgst = digest.FromBytes(b) + } else { + dgst, err = digest.FromReader(&bodyReader) + if err != nil { + return "", ocispec.Descriptor{}, err + } + } + } else if _, err := io.Copy(ioutil.Discard, &bodyReader); err != nil { + return "", ocispec.Descriptor{}, err + } + size = bodyReader.bytesRead + } + // Prevent resolving to excessively large manifests + if size > MaxManifestSize { + if lastErr == nil { + lastErr = errors.Wrapf(errdefs.ErrNotFound, "rejecting %d byte manifest for %s", size, ref) + } + continue + } + + desc := ocispec.Descriptor{ + Digest: dgst, + MediaType: contentType, + Size: size, + } + + log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved") + return ref, desc, nil + } + } + + if lastErr == nil { + lastErr = errors.Wrap(errdefs.ErrNotFound, ref) + } + + return "", ocispec.Descriptor{}, lastErr +} + +func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { + refspec, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + base, err := r.base(refspec) + if err != nil { + return nil, err + } + + return dockerFetcher{ + dockerBase: base, + }, nil +} + +func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) { + refspec, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + base, err := r.base(refspec) + if err != nil { + return nil, err + } + + return dockerPusher{ + dockerBase: base, + object: refspec.Object, + tracker: r.tracker, + }, nil +} + +type dockerBase struct { + refspec reference.Spec + namespace string + hosts []RegistryHost + header http.Header +} + +func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) { + host := refspec.Hostname() + hosts, err := r.hosts(host) + if err != nil { + return nil, err + } + return &dockerBase{ + refspec: refspec, + namespace: strings.TrimPrefix(refspec.Locator, host+"/"), + hosts: hosts, + header: r.header, + }, nil +} + +func (r *dockerBase) filterHosts(caps HostCapabilities) (hosts []RegistryHost) { + for _, host := range r.hosts { + if host.Capabilities.Has(caps) { + hosts = append(hosts, host) + } + } + return +} + +func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *request { + header := http.Header{} + for key, value := range r.header { + header[key] = append(header[key], value...) + } + parts := append([]string{"/", host.Path, r.namespace}, ps...) + p := path.Join(parts...) + // Join strips trailing slash, re-add ending "/" if included + if len(parts) > 0 && strings.HasSuffix(parts[len(parts)-1], "/") { + p = p + "/" + } + return &request{ + method: method, + path: p, + header: header, + host: host, + } +} + +func (r *request) authorize(ctx context.Context, req *http.Request) error { + // Check if has header for host + if r.host.Authorizer != nil { + if err := r.host.Authorizer.Authorize(ctx, req); err != nil { + return err + } + } + + return nil +} + +type request struct { + method string + path string + header http.Header + host RegistryHost + body func() (io.ReadCloser, error) + size int64 +} + +func (r *request) do(ctx context.Context) (*http.Response, error) { + u := r.host.Scheme + "://" + r.host.Host + r.path + req, err := http.NewRequest(r.method, u, nil) + if err != nil { + return nil, err + } + req.Header = r.header + if r.body != nil { + body, err := r.body() + if err != nil { + return nil, err + } + req.Body = body + req.GetBody = r.body + if r.size > 0 { + req.ContentLength = r.size + } + } + + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u)) + log.G(ctx).WithFields(requestFields(req)).Debug("do request") + if err := r.authorize(ctx, req); err != nil { + return nil, errors.Wrap(err, "failed to authorize") + } + resp, err := ctxhttp.Do(ctx, r.host.Client, req) + if err != nil { + return nil, errors.Wrap(err, "failed to do request") + } + log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received") + return resp, nil +} + +func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) { + resp, err := r.do(ctx) + if err != nil { + return nil, err + } + + responses = append(responses, resp) + retry, err := r.retryRequest(ctx, responses) + if err != nil { + resp.Body.Close() + return nil, err + } + if retry { + resp.Body.Close() + return r.doWithRetries(ctx, responses) + } + return resp, err +} + +func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) { + if len(responses) > 5 { + return false, nil + } + last := responses[len(responses)-1] + switch last.StatusCode { + case http.StatusUnauthorized: + log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized") + if r.host.Authorizer != nil { + if err := r.host.Authorizer.AddResponses(ctx, responses); err == nil { + return true, nil + } else if !errdefs.IsNotImplemented(err) { + return false, err + } + } + + return false, nil + case http.StatusMethodNotAllowed: + // Support registries which have not properly implemented the HEAD method for + // manifests endpoint + if r.method == http.MethodHead && strings.Contains(r.path, "/manifests/") { + r.method = http.MethodGet + return true, nil + } + case http.StatusRequestTimeout, http.StatusTooManyRequests: + return true, nil + } + + // TODO: Handle 50x errors accounting for attempt history + return false, nil +} + +func (r *request) String() string { + return r.host.Scheme + "://" + r.host.Host + r.path +} + +func requestFields(req *http.Request) logrus.Fields { + fields := map[string]interface{}{ + "request.method": req.Method, + } + for k, vals := range req.Header { + k = strings.ToLower(k) + if k == "authorization" { + continue + } + for i, v := range vals { + field := "request.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v + } + } + + return logrus.Fields(fields) +} + +func responseFields(resp *http.Response) logrus.Fields { + fields := map[string]interface{}{ + "response.status": resp.Status, + } + for k, vals := range resp.Header { + k = strings.ToLower(k) + for i, v := range vals { + field := "response.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v + } + } + + return logrus.Fields(fields) +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go new file mode 100644 index 00000000..8314c01d --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go @@ -0,0 +1,601 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package schema1 + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/sync/errgroup" + + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + manifestSizeLimit = 8e6 // 8MB + labelDockerSchema1EmptyLayer = "containerd.io/docker.schema1.empty-layer" +) + +type blobState struct { + diffID digest.Digest + empty bool +} + +// Converter converts schema1 manifests to schema2 on fetch +type Converter struct { + contentStore content.Store + fetcher remotes.Fetcher + + pulledManifest *manifest + + mu sync.Mutex + blobMap map[digest.Digest]blobState + layerBlobs map[digest.Digest]ocispec.Descriptor +} + +// NewConverter returns a new converter +func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) *Converter { + return &Converter{ + contentStore: contentStore, + fetcher: fetcher, + blobMap: map[digest.Digest]blobState{}, + layerBlobs: map[digest.Digest]ocispec.Descriptor{}, + } +} + +// Handle fetching descriptors for a docker media type +func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema1Manifest: + if err := c.fetchManifest(ctx, desc); err != nil { + return nil, err + } + + m := c.pulledManifest + if len(m.FSLayers) != len(m.History) { + return nil, errors.New("invalid schema 1 manifest, history and layer mismatch") + } + descs := make([]ocispec.Descriptor, 0, len(c.pulledManifest.FSLayers)) + + for i := range m.FSLayers { + if _, ok := c.blobMap[c.pulledManifest.FSLayers[i].BlobSum]; !ok { + empty, err := isEmptyLayer([]byte(m.History[i].V1Compatibility)) + if err != nil { + return nil, err + } + + // Do no attempt to download a known empty blob + if !empty { + descs = append([]ocispec.Descriptor{ + { + MediaType: images.MediaTypeDockerSchema2LayerGzip, + Digest: c.pulledManifest.FSLayers[i].BlobSum, + Size: -1, + }, + }, descs...) + } + c.blobMap[c.pulledManifest.FSLayers[i].BlobSum] = blobState{ + empty: empty, + } + } + } + return descs, nil + case images.MediaTypeDockerSchema2LayerGzip: + if c.pulledManifest == nil { + return nil, errors.New("manifest required for schema 1 blob pull") + } + return nil, c.fetchBlob(ctx, desc) + default: + return nil, fmt.Errorf("%v not support for schema 1 manifests", desc.MediaType) + } +} + +// ConvertOptions provides options on converting a docker schema1 manifest. +type ConvertOptions struct { + // ManifestMediaType specifies the media type of the manifest OCI descriptor. + ManifestMediaType string + + // ConfigMediaType specifies the media type of the manifest config OCI + // descriptor. + ConfigMediaType string +} + +// ConvertOpt allows configuring a convert operation. +type ConvertOpt func(context.Context, *ConvertOptions) error + +// UseDockerSchema2 is used to indicate that a schema1 manifest should be +// converted into the media types for a docker schema2 manifest. +func UseDockerSchema2() ConvertOpt { + return func(ctx context.Context, o *ConvertOptions) error { + o.ManifestMediaType = images.MediaTypeDockerSchema2Manifest + o.ConfigMediaType = images.MediaTypeDockerSchema2Config + return nil + } +} + +// Convert a docker manifest to an OCI descriptor +func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.Descriptor, error) { + co := ConvertOptions{ + ManifestMediaType: ocispec.MediaTypeImageManifest, + ConfigMediaType: ocispec.MediaTypeImageConfig, + } + for _, opt := range opts { + if err := opt(ctx, &co); err != nil { + return ocispec.Descriptor{}, err + } + } + + history, diffIDs, err := c.schema1ManifestHistory() + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "schema 1 conversion failed") + } + + var img ocispec.Image + if err := json.Unmarshal([]byte(c.pulledManifest.History[0].V1Compatibility), &img); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal image from schema 1 history") + } + + img.History = history + img.RootFS = ocispec.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + + b, err := json.MarshalIndent(img, "", " ") + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image") + } + + config := ocispec.Descriptor{ + MediaType: co.ConfigMediaType, + Digest: digest.Canonical.FromBytes(b), + Size: int64(len(b)), + } + + layers := make([]ocispec.Descriptor, len(diffIDs)) + for i, diffID := range diffIDs { + layers[i] = c.layerBlobs[diffID] + } + + manifest := ocispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: config, + Layers: layers, + } + + mb, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image") + } + + desc := ocispec.Descriptor{ + MediaType: co.ManifestMediaType, + Digest: digest.Canonical.FromBytes(mb), + Size: int64(len(mb)), + } + + labels := map[string]string{} + labels["containerd.io/gc.ref.content.0"] = manifest.Config.Digest.String() + for i, ch := range manifest.Layers { + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = ch.Digest.String() + } + + ref := remotes.MakeRefKey(ctx, desc) + if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image manifest") + } + + ref = remotes.MakeRefKey(ctx, config) + if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image config") + } + + return desc, nil +} + +// ReadStripSignature reads in a schema1 manifest and returns a byte array +// with the "signatures" field stripped +func ReadStripSignature(schema1Blob io.Reader) ([]byte, error) { + b, err := ioutil.ReadAll(io.LimitReader(schema1Blob, manifestSizeLimit)) // limit to 8MB + if err != nil { + return nil, err + } + + return stripSignature(b) +} + +func (c *Converter) fetchManifest(ctx context.Context, desc ocispec.Descriptor) error { + log.G(ctx).Debug("fetch schema 1") + + rc, err := c.fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + + b, err := ReadStripSignature(rc) + rc.Close() + if err != nil { + return err + } + + var m manifest + if err := json.Unmarshal(b, &m); err != nil { + return err + } + c.pulledManifest = &m + + return nil +} + +func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) error { + log.G(ctx).Debug("fetch blob") + + var ( + ref = remotes.MakeRefKey(ctx, desc) + calc = newBlobStateCalculator() + compressMethod = compression.Gzip + ) + + // size may be unknown, set to zero for content ingest + ingestDesc := desc + if ingestDesc.Size == -1 { + ingestDesc.Size = 0 + } + + cw, err := content.OpenWriter(ctx, c.contentStore, content.WithRef(ref), content.WithDescriptor(ingestDesc)) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return err + } + + reuse, err := c.reuseLabelBlobState(ctx, desc) + if err != nil { + return err + } + + if reuse { + return nil + } + + ra, err := c.contentStore.ReaderAt(ctx, desc) + if err != nil { + return err + } + defer ra.Close() + + r, err := compression.DecompressStream(content.NewReader(ra)) + if err != nil { + return err + } + + compressMethod = r.GetCompression() + _, err = io.Copy(calc, r) + r.Close() + if err != nil { + return err + } + } else { + defer cw.Close() + + rc, err := c.fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + defer rc.Close() + + eg, _ := errgroup.WithContext(ctx) + pr, pw := io.Pipe() + + eg.Go(func() error { + r, err := compression.DecompressStream(pr) + if err != nil { + return err + } + + compressMethod = r.GetCompression() + _, err = io.Copy(calc, r) + r.Close() + pr.CloseWithError(err) + return err + }) + + eg.Go(func() error { + defer pw.Close() + + return content.Copy(ctx, cw, io.TeeReader(rc, pw), ingestDesc.Size, ingestDesc.Digest) + }) + + if err := eg.Wait(); err != nil { + return err + } + } + + if desc.Size == -1 { + info, err := c.contentStore.Info(ctx, desc.Digest) + if err != nil { + return errors.Wrap(err, "failed to get blob info") + } + desc.Size = info.Size + } + + if compressMethod == compression.Uncompressed { + log.G(ctx).WithField("id", desc.Digest).Debugf("changed media type for uncompressed schema1 layer blob") + desc.MediaType = images.MediaTypeDockerSchema2Layer + } + + state := calc.State() + + cinfo := content.Info{ + Digest: desc.Digest, + Labels: map[string]string{ + "containerd.io/uncompressed": state.diffID.String(), + labelDockerSchema1EmptyLayer: strconv.FormatBool(state.empty), + }, + } + + if _, err := c.contentStore.Update(ctx, cinfo, "labels.containerd.io/uncompressed", fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil { + return errors.Wrap(err, "failed to update uncompressed label") + } + + c.mu.Lock() + c.blobMap[desc.Digest] = state + c.layerBlobs[state.diffID] = desc + c.mu.Unlock() + + return nil +} + +func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descriptor) (bool, error) { + cinfo, err := c.contentStore.Info(ctx, desc.Digest) + if err != nil { + return false, errors.Wrap(err, "failed to get blob info") + } + desc.Size = cinfo.Size + + diffID, ok := cinfo.Labels["containerd.io/uncompressed"] + if !ok { + return false, nil + } + + emptyVal, ok := cinfo.Labels[labelDockerSchema1EmptyLayer] + if !ok { + return false, nil + } + + isEmpty, err := strconv.ParseBool(emptyVal) + if err != nil { + log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse bool from label %s: %v", labelDockerSchema1EmptyLayer, isEmpty) + return false, nil + } + + bState := blobState{empty: isEmpty} + + if bState.diffID, err = digest.Parse(diffID); err != nil { + log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label containerd.io/uncompressed: %v", diffID) + return false, nil + } + + // NOTE: there is no need to read header to get compression method + // because there are only two kinds of methods. + if bState.diffID == desc.Digest { + desc.MediaType = images.MediaTypeDockerSchema2Layer + } else { + desc.MediaType = images.MediaTypeDockerSchema2LayerGzip + } + + c.mu.Lock() + c.blobMap[desc.Digest] = bState + c.layerBlobs[bState.diffID] = desc + c.mu.Unlock() + return true, nil +} + +func (c *Converter) schema1ManifestHistory() ([]ocispec.History, []digest.Digest, error) { + if c.pulledManifest == nil { + return nil, nil, errors.New("missing schema 1 manifest for conversion") + } + m := *c.pulledManifest + + if len(m.History) == 0 { + return nil, nil, errors.New("no history") + } + + history := make([]ocispec.History, len(m.History)) + diffIDs := []digest.Digest{} + for i := range m.History { + var h v1History + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil { + return nil, nil, errors.Wrap(err, "failed to unmarshal history") + } + + blobSum := m.FSLayers[i].BlobSum + + state := c.blobMap[blobSum] + + history[len(history)-i-1] = ocispec.History{ + Author: h.Author, + Comment: h.Comment, + Created: &h.Created, + CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "), + EmptyLayer: state.empty, + } + + if !state.empty { + diffIDs = append([]digest.Digest{state.diffID}, diffIDs...) + + } + } + + return history, diffIDs, nil +} + +type fsLayer struct { + BlobSum digest.Digest `json:"blobSum"` +} + +type history struct { + V1Compatibility string `json:"v1Compatibility"` +} + +type manifest struct { + FSLayers []fsLayer `json:"fsLayers"` + History []history `json:"history"` +} + +type v1History struct { + Author string `json:"author,omitempty"` + Created time.Time `json:"created"` + Comment string `json:"comment,omitempty"` + ThrowAway *bool `json:"throwaway,omitempty"` + Size *int `json:"Size,omitempty"` // used before ThrowAway field + ContainerConfig struct { + Cmd []string `json:"Cmd,omitempty"` + } `json:"container_config,omitempty"` +} + +// isEmptyLayer returns whether the v1 compatibility history describes an +// empty layer. A return value of true indicates the layer is empty, +// however false does not indicate non-empty. +func isEmptyLayer(compatHistory []byte) (bool, error) { + var h v1History + if err := json.Unmarshal(compatHistory, &h); err != nil { + return false, err + } + + if h.ThrowAway != nil { + return *h.ThrowAway, nil + } + if h.Size != nil { + return *h.Size == 0, nil + } + + // If no `Size` or `throwaway` field is given, then + // it cannot be determined whether the layer is empty + // from the history, return false + return false, nil +} + +type signature struct { + Signatures []jsParsedSignature `json:"signatures"` +} + +type jsParsedSignature struct { + Protected string `json:"protected"` +} + +type protectedBlock struct { + Length int `json:"formatLength"` + Tail string `json:"formatTail"` +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func stripSignature(b []byte) ([]byte, error) { + var sig signature + if err := json.Unmarshal(b, &sig); err != nil { + return nil, err + } + if len(sig.Signatures) == 0 { + return nil, errors.New("no signatures") + } + pb, err := joseBase64UrlDecode(sig.Signatures[0].Protected) + if err != nil { + return nil, errors.Wrapf(err, "could not decode %s", sig.Signatures[0].Protected) + } + + var protected protectedBlock + if err := json.Unmarshal(pb, &protected); err != nil { + return nil, err + } + + if protected.Length > len(b) { + return nil, errors.New("invalid protected length block") + } + + tail, err := joseBase64UrlDecode(protected.Tail) + if err != nil { + return nil, errors.Wrap(err, "invalid tail base 64 value") + } + + return append(b[:protected.Length], tail...), nil +} + +type blobStateCalculator struct { + empty bool + digester digest.Digester +} + +func newBlobStateCalculator() *blobStateCalculator { + return &blobStateCalculator{ + empty: true, + digester: digest.Canonical.Digester(), + } +} + +func (c *blobStateCalculator) Write(p []byte) (int, error) { + if c.empty { + for _, b := range p { + if b != 0x00 { + c.empty = false + break + } + } + } + return c.digester.Hash().Write(p) +} + +func (c *blobStateCalculator) State() blobState { + return blobState{ + empty: c.empty, + diffID: c.digester.Digest(), + } +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/scope.go b/vendor/github.com/containerd/containerd/remotes/docker/scope.go new file mode 100644 index 00000000..fa840143 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/scope.go @@ -0,0 +1,97 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "net/url" + "sort" + "strings" + + "github.com/containerd/containerd/reference" +) + +// repositoryScope returns a repository scope string such as "repository:foo/bar:pull" +// for "host/foo/bar:baz". +// When push is true, both pull and push are added to the scope. +func repositoryScope(refspec reference.Spec, push bool) (string, error) { + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + return "", err + } + s := "repository:" + strings.TrimPrefix(u.Path, "/") + ":pull" + if push { + s += ",push" + } + return s, nil +} + +// tokenScopesKey is used for the key for context.WithValue(). +// value: []string (e.g. {"registry:foo/bar:pull"}) +type tokenScopesKey struct{} + +// contextWithRepositoryScope returns a context with tokenScopesKey{} and the repository scope value. +func contextWithRepositoryScope(ctx context.Context, refspec reference.Spec, push bool) (context.Context, error) { + s, err := repositoryScope(refspec, push) + if err != nil { + return nil, err + } + return WithScope(ctx, s), nil +} + +// WithScope appends a custom registry auth scope to the context. +func WithScope(ctx context.Context, scope string) context.Context { + var scopes []string + if v := ctx.Value(tokenScopesKey{}); v != nil { + scopes = v.([]string) + scopes = append(scopes, scope) + } else { + scopes = []string{scope} + } + return context.WithValue(ctx, tokenScopesKey{}, scopes) +} + +// contextWithAppendPullRepositoryScope is used to append repository pull +// scope into existing scopes indexed by the tokenScopesKey{}. +func contextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context { + return WithScope(ctx, fmt.Sprintf("repository:%s:pull", repo)) +} + +// getTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes. +func getTokenScopes(ctx context.Context, common []string) []string { + var scopes []string + if x := ctx.Value(tokenScopesKey{}); x != nil { + scopes = append(scopes, x.([]string)...) + } + + scopes = append(scopes, common...) + sort.Strings(scopes) + + l := 0 + for idx := 1; idx < len(scopes); idx++ { + // Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/) + // So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal. + if scopes[l] == scopes[idx] { + continue + } + + l++ + scopes[l] = scopes[idx] + } + return scopes[:l+1] +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/status.go b/vendor/github.com/containerd/containerd/remotes/docker/status.go new file mode 100644 index 00000000..8069d676 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/status.go @@ -0,0 +1,67 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/pkg/errors" +) + +// Status of a content operation +type Status struct { + content.Status + + // UploadUUID is used by the Docker registry to reference blob uploads + UploadUUID string +} + +// StatusTracker to track status of operations +type StatusTracker interface { + GetStatus(string) (Status, error) + SetStatus(string, Status) +} + +type memoryStatusTracker struct { + statuses map[string]Status + m sync.Mutex +} + +// NewInMemoryTracker returns a StatusTracker that tracks content status in-memory +func NewInMemoryTracker() StatusTracker { + return &memoryStatusTracker{ + statuses: map[string]Status{}, + } +} + +func (t *memoryStatusTracker) GetStatus(ref string) (Status, error) { + t.m.Lock() + defer t.m.Unlock() + status, ok := t.statuses[ref] + if !ok { + return Status{}, errors.Wrapf(errdefs.ErrNotFound, "status for ref %v", ref) + } + return status, nil +} + +func (t *memoryStatusTracker) SetStatus(ref string, status Status) { + t.m.Lock() + t.statuses[ref] = status + t.m.Unlock() +} diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go new file mode 100644 index 00000000..671fea10 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -0,0 +1,308 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package remotes + +import ( + "context" + "fmt" + "io" + "strings" + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/platforms" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type refKeyPrefix struct{} + +// WithMediaTypeKeyPrefix adds a custom key prefix for a media type which is used when storing +// data in the content store from the FetchHandler. +// +// Used in `MakeRefKey` to determine what the key prefix should be. +func WithMediaTypeKeyPrefix(ctx context.Context, mediaType, prefix string) context.Context { + var values map[string]string + if v := ctx.Value(refKeyPrefix{}); v != nil { + values = v.(map[string]string) + } else { + values = make(map[string]string) + } + + values[mediaType] = prefix + return context.WithValue(ctx, refKeyPrefix{}, values) +} + +// MakeRefKey returns a unique reference for the descriptor. This reference can be +// used to lookup ongoing processes related to the descriptor. This function +// may look to the context to namespace the reference appropriately. +func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { + if v := ctx.Value(refKeyPrefix{}); v != nil { + values := v.(map[string]string) + if prefix := values[desc.MediaType]; prefix != "" { + return prefix + "-" + desc.Digest.String() + } + } + + switch mt := desc.MediaType; { + case mt == images.MediaTypeDockerSchema2Manifest || mt == ocispec.MediaTypeImageManifest: + return "manifest-" + desc.Digest.String() + case mt == images.MediaTypeDockerSchema2ManifestList || mt == ocispec.MediaTypeImageIndex: + return "index-" + desc.Digest.String() + case images.IsLayerType(mt): + return "layer-" + desc.Digest.String() + case images.IsKnownConfig(mt): + return "config-" + desc.Digest.String() + default: + log.G(ctx).Warnf("reference for unknown type: %s", mt) + return "unknown-" + desc.Digest.String() + } +} + +// FetchHandler returns a handler that will fetch all content into the ingester +// discovered in a call to Dispatch. Use with ChildrenHandler to do a full +// recursive fetch. +func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{ + "digest": desc.Digest, + "mediatype": desc.MediaType, + "size": desc.Size, + })) + + switch desc.MediaType { + case images.MediaTypeDockerSchema1Manifest: + return nil, fmt.Errorf("%v not supported", desc.MediaType) + default: + err := fetch(ctx, ingester, fetcher, desc) + return nil, err + } + } +} + +func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error { + log.G(ctx).Debug("fetch") + + cw, err := content.OpenWriter(ctx, ingester, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) + if err != nil { + if errdefs.IsAlreadyExists(err) { + return nil + } + return err + } + defer cw.Close() + + ws, err := cw.Status() + if err != nil { + return err + } + + if ws.Offset == desc.Size { + // If writer is already complete, commit and return + err := cw.Commit(ctx, desc.Size, desc.Digest) + if err != nil && !errdefs.IsAlreadyExists(err) { + return errors.Wrapf(err, "failed commit on ref %q", ws.Ref) + } + return nil + } + + rc, err := fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + defer rc.Close() + + return content.Copy(ctx, cw, rc, desc.Size, desc.Digest) +} + +// PushHandler returns a handler that will push all content from the provider +// using a writer from the pusher. +func PushHandler(pusher Pusher, provider content.Provider) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{ + "digest": desc.Digest, + "mediatype": desc.MediaType, + "size": desc.Size, + })) + + err := push(ctx, provider, pusher, desc) + return nil, err + } +} + +func push(ctx context.Context, provider content.Provider, pusher Pusher, desc ocispec.Descriptor) error { + log.G(ctx).Debug("push") + + cw, err := pusher.Push(ctx, desc) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return err + } + + return nil + } + defer cw.Close() + + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + return err + } + defer ra.Close() + + rd := io.NewSectionReader(ra, 0, desc.Size) + return content.Copy(ctx, cw, rd, desc.Size, desc.Digest) +} + +// PushContent pushes content specified by the descriptor from the provider. +// +// Base handlers can be provided which will be called before any push specific +// handlers. +func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Store, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { + var m sync.Mutex + manifestStack := []ocispec.Descriptor{} + + filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + m.Lock() + manifestStack = append(manifestStack, desc) + m.Unlock() + return nil, images.ErrStopHandler + default: + return nil, nil + } + }) + + pushHandler := PushHandler(pusher, store) + + platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform) + + annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, store) + + var handler images.Handler = images.Handlers( + annotateHandler, + filterHandler, + pushHandler, + ) + if wrapper != nil { + handler = wrapper(handler) + } + + if err := images.Dispatch(ctx, handler, nil, desc); err != nil { + return err + } + + // Iterate in reverse order as seen, parent always uploaded after child + for i := len(manifestStack) - 1; i >= 0; i-- { + _, err := pushHandler(ctx, manifestStack[i]) + if err != nil { + // TODO(estesp): until we have a more complete method for index push, we need to report + // missing dependencies in an index/manifest list by sensing the "400 Bad Request" + // as a marker for this problem + if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex || + manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) && + errors.Cause(err) != nil && strings.Contains(errors.Cause(err).Error(), "400 Bad Request") { + return errors.Wrap(err, "manifest list/index references to blobs and/or manifests are missing in your target registry") + } + return err + } + } + + return nil +} + +// FilterManifestByPlatformHandler allows Handler to handle non-target +// platform's manifest and configuration data. +func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // no platform information + if desc.Platform == nil || m == nil { + return children, nil + } + + var descs []ocispec.Descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + if m.Match(*desc.Platform) { + descs = children + } else { + for _, child := range children { + if child.MediaType == images.MediaTypeDockerSchema2Config || + child.MediaType == ocispec.MediaTypeImageConfig { + + descs = append(descs, child) + } + } + } + default: + descs = children + } + return descs, nil + } +} + +// annotateDistributionSourceHandler add distribution source label into +// annotation of config or blob descriptor. +func annotateDistributionSourceHandler(f images.HandlerFunc, manager content.Manager) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // only add distribution source for the config or blob data descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + default: + return children, nil + } + + for i := range children { + child := children[i] + + info, err := manager.Info(ctx, child.Digest) + if err != nil { + return nil, err + } + + for k, v := range info.Labels { + if !strings.HasPrefix(k, "containerd.io/distribution.source.") { + continue + } + + if child.Annotations == nil { + child.Annotations = map[string]string{} + } + child.Annotations[k] = v + } + + children[i] = child + } + return children, nil + } +} diff --git a/vendor/github.com/containerd/containerd/remotes/resolver.go b/vendor/github.com/containerd/containerd/remotes/resolver.go new file mode 100644 index 00000000..914d351f --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/resolver.go @@ -0,0 +1,80 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package remotes + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Resolver provides remotes based on a locator. +type Resolver interface { + // Resolve attempts to resolve the reference into a name and descriptor. + // + // The argument `ref` should be a scheme-less URI representing the remote. + // Structurally, it has a host and path. The "host" can be used to directly + // reference a specific host or be matched against a specific handler. + // + // The returned name should be used to identify the referenced entity. + // Dependending on the remote namespace, this may be immutable or mutable. + // While the name may differ from ref, it should itself be a valid ref. + // + // If the resolution fails, an error will be returned. + Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) + + // Fetcher returns a new fetcher for the provided reference. + // All content fetched from the returned fetcher will be + // from the namespace referred to by ref. + Fetcher(ctx context.Context, ref string) (Fetcher, error) + + // Pusher returns a new pusher for the provided reference + Pusher(ctx context.Context, ref string) (Pusher, error) +} + +// Fetcher fetches content +type Fetcher interface { + // Fetch the resource identified by the descriptor. + Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) +} + +// Pusher pushes content +type Pusher interface { + // Push returns a content writer for the given resource identified + // by the descriptor. + Push(ctx context.Context, d ocispec.Descriptor) (content.Writer, error) +} + +// FetcherFunc allows package users to implement a Fetcher with just a +// function. +type FetcherFunc func(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) + +// Fetch content +func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + return fn(ctx, desc) +} + +// PusherFunc allows package users to implement a Pusher with just a +// function. +type PusherFunc func(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) + +// Push content +func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { + return fn(ctx, desc) +} diff --git a/vendor/github.com/containerd/containerd/rootfs/apply.go b/vendor/github.com/containerd/containerd/rootfs/apply.go new file mode 100644 index 00000000..73d4ccca --- /dev/null +++ b/vendor/github.com/containerd/containerd/rootfs/apply.go @@ -0,0 +1,187 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package rootfs + +import ( + "context" + "encoding/base64" + "fmt" + "math/rand" + "time" + + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/snapshots" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// Layer represents the descriptors for a layer diff. These descriptions +// include the descriptor for the uncompressed tar diff as well as a blob +// used to transport that tar. The blob descriptor may or may not describe +// a compressed object. +type Layer struct { + Diff ocispec.Descriptor + Blob ocispec.Descriptor +} + +// ApplyLayers applies all the layers using the given snapshotter and applier. +// The returned result is a chain id digest representing all the applied layers. +// Layers are applied in order they are given, making the first layer the +// bottom-most layer in the layer chain. +func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier) (digest.Digest, error) { + return ApplyLayersWithOpts(ctx, layers, sn, a, nil) +} + +// ApplyLayersWithOpts applies all the layers using the given snapshotter, applier, and apply opts. +// The returned result is a chain id digest representing all the applied layers. +// Layers are applied in order they are given, making the first layer the +// bottom-most layer in the layer chain. +func ApplyLayersWithOpts(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier, applyOpts []diff.ApplyOpt) (digest.Digest, error) { + chain := make([]digest.Digest, len(layers)) + for i, layer := range layers { + chain[i] = layer.Diff.Digest + } + chainID := identity.ChainID(chain) + + // Just stat top layer, remaining layers will have their existence checked + // on prepare. Calling prepare on upper layers first guarantees that upper + // layers are not removed while calling stat on lower layers + _, err := sn.Stat(ctx, chainID.String()) + if err != nil { + if !errdefs.IsNotFound(err) { + return "", errors.Wrapf(err, "failed to stat snapshot %s", chainID) + } + + if err := applyLayers(ctx, layers, chain, sn, a, nil, applyOpts); err != nil && !errdefs.IsAlreadyExists(err) { + return "", err + } + } + + return chainID, nil +} + +// ApplyLayer applies a single layer on top of the given provided layer chain, +// using the provided snapshotter and applier. If the layer was unpacked true +// is returned, if the layer already exists false is returned. +func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) (bool, error) { + return ApplyLayerWithOpts(ctx, layer, chain, sn, a, opts, nil) +} + +// ApplyLayerWithOpts applies a single layer on top of the given provided layer chain, +// using the provided snapshotter, applier, and apply opts. If the layer was unpacked true +// is returned, if the layer already exists false is returned. +func ApplyLayerWithOpts(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts []snapshots.Opt, applyOpts []diff.ApplyOpt) (bool, error) { + var ( + chainID = identity.ChainID(append(chain, layer.Diff.Digest)).String() + applied bool + ) + if _, err := sn.Stat(ctx, chainID); err != nil { + if !errdefs.IsNotFound(err) { + return false, errors.Wrapf(err, "failed to stat snapshot %s", chainID) + } + + if err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts, applyOpts); err != nil { + if !errdefs.IsAlreadyExists(err) { + return false, err + } + } else { + applied = true + } + } + return applied, nil + +} + +func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts []snapshots.Opt, applyOpts []diff.ApplyOpt) error { + var ( + parent = identity.ChainID(chain[:len(chain)-1]) + chainID = identity.ChainID(chain) + layer = layers[len(layers)-1] + diff ocispec.Descriptor + key string + mounts []mount.Mount + err error + ) + + for { + key = fmt.Sprintf("extract-%s %s", uniquePart(), chainID) + + // Prepare snapshot with from parent, label as root + mounts, err = sn.Prepare(ctx, key, parent.String(), opts...) + if err != nil { + if errdefs.IsNotFound(err) && len(layers) > 1 { + if err := applyLayers(ctx, layers[:len(layers)-1], chain[:len(chain)-1], sn, a, nil, applyOpts); err != nil { + if !errdefs.IsAlreadyExists(err) { + return err + } + } + // Do no try applying layers again + layers = nil + continue + } else if errdefs.IsAlreadyExists(err) { + // Try a different key + continue + } + + // Already exists should have the caller retry + return errors.Wrapf(err, "failed to prepare extraction snapshot %q", key) + + } + break + } + defer func() { + if err != nil { + if !errdefs.IsAlreadyExists(err) { + log.G(ctx).WithError(err).WithField("key", key).Infof("apply failure, attempting cleanup") + } + + if rerr := sn.Remove(ctx, key); rerr != nil { + log.G(ctx).WithError(rerr).WithField("key", key).Warnf("extraction snapshot removal failed") + } + } + }() + + diff, err = a.Apply(ctx, layer.Blob, mounts, applyOpts...) + if err != nil { + err = errors.Wrapf(err, "failed to extract layer %s", layer.Diff.Digest) + return err + } + if diff.Digest != layer.Diff.Digest { + err = errors.Errorf("wrong diff id calculated on extraction %q", diff.Digest) + return err + } + + if err = sn.Commit(ctx, chainID.String(), key, opts...); err != nil { + err = errors.Wrapf(err, "failed to commit snapshot %s", key) + return err + } + + return nil +} + +func uniquePart() string { + t := time.Now() + var b [3]byte + // Ignore read failures, just decreases uniqueness + rand.Read(b[:]) + return fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:])) +} diff --git a/vendor/github.com/containerd/containerd/rootfs/diff.go b/vendor/github.com/containerd/containerd/rootfs/diff.go new file mode 100644 index 00000000..f396c73a --- /dev/null +++ b/vendor/github.com/containerd/containerd/rootfs/diff.go @@ -0,0 +1,70 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package rootfs + +import ( + "context" + "fmt" + + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/snapshots" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// CreateDiff creates a layer diff for the given snapshot identifier from the +// parent of the snapshot. A content ref is provided to track the progress of +// the content creation and the provided snapshotter and mount differ are used +// for calculating the diff. The descriptor for the layer diff is returned. +func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter, d diff.Comparer, opts ...diff.Opt) (ocispec.Descriptor, error) { + // dctx is used to handle cleanup things just in case the param ctx + // has been canceled, which causes that the defer cleanup fails. + dctx := context.Background() + if ns, ok := namespaces.Namespace(ctx); ok { + dctx = namespaces.WithNamespace(dctx, ns) + } + + info, err := sn.Stat(ctx, snapshotID) + if err != nil { + return ocispec.Descriptor{}, err + } + + lowerKey := fmt.Sprintf("%s-parent-view", info.Parent) + lower, err := sn.View(ctx, lowerKey, info.Parent) + if err != nil { + return ocispec.Descriptor{}, err + } + defer sn.Remove(dctx, lowerKey) + + var upper []mount.Mount + if info.Kind == snapshots.KindActive { + upper, err = sn.Mounts(ctx, snapshotID) + if err != nil { + return ocispec.Descriptor{}, err + } + } else { + upperKey := fmt.Sprintf("%s-view", snapshotID) + upper, err = sn.View(ctx, upperKey, snapshotID) + if err != nil { + return ocispec.Descriptor{}, err + } + defer sn.Remove(dctx, upperKey) + } + + return d.Compare(ctx, lower, upper, opts...) +} diff --git a/vendor/github.com/containerd/containerd/rootfs/init.go b/vendor/github.com/containerd/containerd/rootfs/init.go new file mode 100644 index 00000000..325e5531 --- /dev/null +++ b/vendor/github.com/containerd/containerd/rootfs/init.go @@ -0,0 +1,117 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package rootfs + +import ( + "context" + "fmt" + "io/ioutil" + "os" + + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/snapshots" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +var ( + initializers = map[string]initializerFunc{} +) + +type initializerFunc func(string) error + +// Mounter handles mount and unmount +type Mounter interface { + Mount(target string, mounts ...mount.Mount) error + Unmount(target string) error +} + +// InitRootFS initializes the snapshot for use as a rootfs +func InitRootFS(ctx context.Context, name string, parent digest.Digest, readonly bool, snapshotter snapshots.Snapshotter, mounter Mounter) ([]mount.Mount, error) { + _, err := snapshotter.Stat(ctx, name) + if err == nil { + return nil, errors.Errorf("rootfs already exists") + } + // TODO: ensure not exist error once added to snapshot package + + parentS := parent.String() + + initName := defaultInitializer + initFn := initializers[initName] + if initFn != nil { + parentS, err = createInitLayer(ctx, parentS, initName, initFn, snapshotter, mounter) + if err != nil { + return nil, err + } + } + + if readonly { + return snapshotter.View(ctx, name, parentS) + } + + return snapshotter.Prepare(ctx, name, parentS) +} + +func createInitLayer(ctx context.Context, parent, initName string, initFn func(string) error, snapshotter snapshots.Snapshotter, mounter Mounter) (string, error) { + initS := fmt.Sprintf("%s %s", parent, initName) + if _, err := snapshotter.Stat(ctx, initS); err == nil { + return initS, nil + } + // TODO: ensure not exist error once added to snapshot package + + // Create tempdir + td, err := ioutil.TempDir(os.Getenv("XDG_RUNTIME_DIR"), "create-init-") + if err != nil { + return "", err + } + defer os.RemoveAll(td) + + mounts, err := snapshotter.Prepare(ctx, td, parent) + if err != nil { + return "", err + } + + defer func() { + if err != nil { + if rerr := snapshotter.Remove(ctx, td); rerr != nil { + log.G(ctx).Errorf("Failed to remove snapshot %s: %v", td, rerr) + } + } + }() + + if err = mounter.Mount(td, mounts...); err != nil { + return "", err + } + + if err = initFn(td); err != nil { + if merr := mounter.Unmount(td); merr != nil { + log.G(ctx).Errorf("Failed to unmount %s: %v", td, merr) + } + return "", err + } + + if err = mounter.Unmount(td); err != nil { + return "", err + } + + if err := snapshotter.Commit(ctx, initS, td); err != nil { + return "", err + } + + return initS, nil +} diff --git a/vendor/github.com/containerd/containerd/rootfs/init_linux.go b/vendor/github.com/containerd/containerd/rootfs/init_linux.go new file mode 100644 index 00000000..84dc5652 --- /dev/null +++ b/vendor/github.com/containerd/containerd/rootfs/init_linux.go @@ -0,0 +1,130 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package rootfs + +import ( + "os" + "path/filepath" + "syscall" +) + +const ( + defaultInitializer = "linux-init" +) + +func init() { + initializers[defaultInitializer] = initFS +} + +func createDirectory(name string, uid, gid int) initializerFunc { + return func(root string) error { + dname := filepath.Join(root, name) + st, err := os.Stat(dname) + if err != nil && !os.IsNotExist(err) { + return err + } else if err == nil { + if st.IsDir() { + stat := st.Sys().(*syscall.Stat_t) + if int(stat.Gid) == gid && int(stat.Uid) == uid { + return nil + } + } else { + if err := os.Remove(dname); err != nil { + return err + } + if err := os.Mkdir(dname, 0755); err != nil { + return err + } + } + } else { + if err := os.Mkdir(dname, 0755); err != nil { + return err + } + } + + return os.Chown(dname, uid, gid) + } +} + +func touchFile(name string, uid, gid int) initializerFunc { + return func(root string) error { + fname := filepath.Join(root, name) + + st, err := os.Stat(fname) + if err != nil && !os.IsNotExist(err) { + return err + } else if err == nil { + stat := st.Sys().(*syscall.Stat_t) + if int(stat.Gid) == gid && int(stat.Uid) == uid { + return nil + } + return os.Chown(fname, uid, gid) + } + + f, err := os.OpenFile(fname, os.O_CREATE, 0644) + if err != nil { + return err + } + defer f.Close() + + return f.Chown(uid, gid) + } +} + +func symlink(oldname, newname string) initializerFunc { + return func(root string) error { + linkName := filepath.Join(root, newname) + if _, err := os.Stat(linkName); err != nil && !os.IsNotExist(err) { + return err + } else if err == nil { + return nil + } + return os.Symlink(oldname, linkName) + } +} + +func initFS(root string) error { + st, err := os.Stat(root) + if err != nil { + return err + } + stat := st.Sys().(*syscall.Stat_t) + uid := int(stat.Uid) + gid := int(stat.Gid) + + initFuncs := []initializerFunc{ + createDirectory("/dev", uid, gid), + createDirectory("/dev/pts", uid, gid), + createDirectory("/dev/shm", uid, gid), + touchFile("/dev/console", uid, gid), + createDirectory("/proc", uid, gid), + createDirectory("/sys", uid, gid), + createDirectory("/etc", uid, gid), + touchFile("/etc/resolv.conf", uid, gid), + touchFile("/etc/hosts", uid, gid), + touchFile("/etc/hostname", uid, gid), + symlink("/proc/mounts", "/etc/mtab"), + } + + for _, fn := range initFuncs { + if err := fn(root); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/containerd/containerd/rootfs/init_other.go b/vendor/github.com/containerd/containerd/rootfs/init_other.go new file mode 100644 index 00000000..26112108 --- /dev/null +++ b/vendor/github.com/containerd/containerd/rootfs/init_other.go @@ -0,0 +1,23 @@ +// +build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package rootfs + +const ( + defaultInitializer = "" +) diff --git a/vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go b/vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go new file mode 100644 index 00000000..7b6efdb3 --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go @@ -0,0 +1,463 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package contentserver + +import ( + "context" + "io" + "sync" + + api "github.com/containerd/containerd/api/services/content/v1" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + ptypes "github.com/gogo/protobuf/types" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type service struct { + store content.Store +} + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +// New returns the content GRPC server +func New(cs content.Store) api.ContentServer { + return &service{store: cs} +} + +func (s *service) Register(server *grpc.Server) error { + api.RegisterContentServer(server, s) + return nil +} + +func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) { + if err := req.Digest.Validate(); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest) + } + + bi, err := s.store.Info(ctx, req.Digest) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &api.InfoResponse{ + Info: infoToGRPC(bi), + }, nil +} + +func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) { + if err := req.Info.Digest.Validate(); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest) + } + + info, err := s.store.Update(ctx, infoFromGRPC(req.Info), req.UpdateMask.GetPaths()...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &api.UpdateResponse{ + Info: infoToGRPC(info), + }, nil +} + +func (s *service) List(req *api.ListContentRequest, session api.Content_ListServer) error { + var ( + buffer []api.Info + sendBlock = func(block []api.Info) error { + // send last block + return session.Send(&api.ListContentResponse{ + Info: block, + }) + } + ) + + if err := s.store.Walk(session.Context(), func(info content.Info) error { + buffer = append(buffer, api.Info{ + Digest: info.Digest, + Size_: info.Size, + CreatedAt: info.CreatedAt, + Labels: info.Labels, + }) + + if len(buffer) >= 100 { + if err := sendBlock(buffer); err != nil { + return err + } + + buffer = buffer[:0] + } + + return nil + }, req.Filters...); err != nil { + return errdefs.ToGRPC(err) + } + + if len(buffer) > 0 { + // send last block + if err := sendBlock(buffer); err != nil { + return err + } + } + + return nil +} + +func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*ptypes.Empty, error) { + log.G(ctx).WithField("digest", req.Digest).Debugf("delete content") + if err := req.Digest.Validate(); err != nil { + return nil, status.Errorf(codes.InvalidArgument, err.Error()) + } + + if err := s.store.Delete(ctx, req.Digest); err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &ptypes.Empty{}, nil +} + +func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error { + if err := req.Digest.Validate(); err != nil { + return status.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err) + } + + oi, err := s.store.Info(session.Context(), req.Digest) + if err != nil { + return errdefs.ToGRPC(err) + } + + ra, err := s.store.ReaderAt(session.Context(), ocispec.Descriptor{Digest: req.Digest}) + if err != nil { + return errdefs.ToGRPC(err) + } + defer ra.Close() + + var ( + offset = req.Offset + // size is read size, not the expected size of the blob (oi.Size), which the caller might not be aware of. + // offset+size can be larger than oi.Size. + size = req.Size_ + + // TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably + // little inefficient for work over a fast network. We can tune this later. + p = bufPool.Get().(*[]byte) + ) + defer bufPool.Put(p) + + if offset < 0 { + offset = 0 + } + + if offset > oi.Size { + return status.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size) + } + + if size <= 0 || offset+size > oi.Size { + size = oi.Size - offset + } + + _, err = io.CopyBuffer( + &readResponseWriter{session: session}, + io.NewSectionReader(ra, offset, size), *p) + return errdefs.ToGRPC(err) +} + +// readResponseWriter is a writer that places the output into ReadContentRequest messages. +// +// This allows io.CopyBuffer to do the heavy lifting of chunking the responses +// into the buffer size. +type readResponseWriter struct { + offset int64 + session api.Content_ReadServer +} + +func (rw *readResponseWriter) Write(p []byte) (n int, err error) { + if err := rw.session.Send(&api.ReadContentResponse{ + Offset: rw.offset, + Data: p, + }); err != nil { + return 0, err + } + + rw.offset += int64(len(p)) + return len(p), nil +} + +func (s *service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) { + status, err := s.store.Status(ctx, req.Ref) + if err != nil { + return nil, errdefs.ToGRPCf(err, "could not get status for ref %q", req.Ref) + } + + var resp api.StatusResponse + resp.Status = &api.Status{ + StartedAt: status.StartedAt, + UpdatedAt: status.UpdatedAt, + Ref: status.Ref, + Offset: status.Offset, + Total: status.Total, + Expected: status.Expected, + } + + return &resp, nil +} + +func (s *service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest) (*api.ListStatusesResponse, error) { + statuses, err := s.store.ListStatuses(ctx, req.Filters...) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + + var resp api.ListStatusesResponse + for _, status := range statuses { + resp.Statuses = append(resp.Statuses, api.Status{ + StartedAt: status.StartedAt, + UpdatedAt: status.UpdatedAt, + Ref: status.Ref, + Offset: status.Offset, + Total: status.Total, + Expected: status.Expected, + }) + } + + return &resp, nil +} + +func (s *service) Write(session api.Content_WriteServer) (err error) { + var ( + ctx = session.Context() + msg api.WriteContentResponse + req *api.WriteContentRequest + ref string + total int64 + expected digest.Digest + ) + + defer func(msg *api.WriteContentResponse) { + // pump through the last message if no error was encountered + if err != nil { + if s, ok := status.FromError(err); ok && s.Code() != codes.AlreadyExists { + // TODO(stevvooe): Really need a log line here to track which + // errors are actually causing failure on the server side. May want + // to configure the service with an interceptor to make this work + // identically across all GRPC methods. + // + // This is pretty noisy, so we can remove it but leave it for now. + log.G(ctx).WithError(err).Error("(*service).Write failed") + } + + return + } + + err = session.Send(msg) + }(&msg) + + // handle the very first request! + req, err = session.Recv() + if err != nil { + return err + } + + ref = req.Ref + + if ref == "" { + return status.Errorf(codes.InvalidArgument, "first message must have a reference") + } + + fields := logrus.Fields{ + "ref": ref, + } + total = req.Total + expected = req.Expected + if total > 0 { + fields["total"] = total + } + + if expected != "" { + fields["expected"] = expected + } + + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields)) + + log.G(ctx).Debug("(*service).Write started") + // this action locks the writer for the session. + wr, err := s.store.Writer(ctx, + content.WithRef(ref), + content.WithDescriptor(ocispec.Descriptor{Size: total, Digest: expected})) + if err != nil { + return errdefs.ToGRPC(err) + } + defer wr.Close() + + for { + msg.Action = req.Action + ws, err := wr.Status() + if err != nil { + return errdefs.ToGRPC(err) + } + + msg.Offset = ws.Offset // always set the offset. + + // NOTE(stevvooe): In general, there are two cases underwhich a remote + // writer is used. + // + // For pull, we almost always have this before fetching large content, + // through descriptors. We allow predeclaration of the expected size + // and digest. + // + // For push, it is more complex. If we want to cut through content into + // storage, we may have no expectation until we are done processing the + // content. The case here is the following: + // + // 1. Start writing content. + // 2. Compress inline. + // 3. Validate digest and size (maybe). + // + // Supporting these two paths is quite awkward but it lets both API + // users use the same writer style for each with a minimum of overhead. + if req.Expected != "" { + if expected != "" && expected != req.Expected { + log.G(ctx).Debugf("commit digest differs from writer digest: %v != %v", req.Expected, expected) + } + expected = req.Expected + + if _, err := s.store.Info(session.Context(), req.Expected); err == nil { + if err := wr.Close(); err != nil { + log.G(ctx).WithError(err).Error("failed to close writer") + } + if err := s.store.Abort(session.Context(), ref); err != nil { + log.G(ctx).WithError(err).Error("failed to abort write") + } + + return status.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected) + } + } + + if req.Total > 0 { + // Update the expected total. Typically, this could be seen at + // negotiation time or on a commit message. + if total > 0 && req.Total != total { + log.G(ctx).Debugf("commit size differs from writer size: %v != %v", req.Total, total) + } + total = req.Total + } + + switch req.Action { + case api.WriteActionStat: + msg.Digest = wr.Digest() + msg.StartedAt = ws.StartedAt + msg.UpdatedAt = ws.UpdatedAt + msg.Total = total + case api.WriteActionWrite, api.WriteActionCommit: + if req.Offset > 0 { + // validate the offset if provided + if req.Offset != ws.Offset { + return status.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset) + } + } + + if req.Offset == 0 && ws.Offset > 0 { + if err := wr.Truncate(req.Offset); err != nil { + return errors.Wrapf(err, "truncate failed") + } + msg.Offset = req.Offset + } + + // issue the write if we actually have data. + if len(req.Data) > 0 { + // While this looks like we could use io.WriterAt here, because we + // maintain the offset as append only, we just issue the write. + n, err := wr.Write(req.Data) + if err != nil { + return errdefs.ToGRPC(err) + } + + if n != len(req.Data) { + // TODO(stevvooe): Perhaps, we can recover this by including it + // in the offset on the write return. + return status.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data)) + } + + msg.Offset += int64(n) + } + + if req.Action == api.WriteActionCommit { + var opts []content.Opt + if req.Labels != nil { + opts = append(opts, content.WithLabels(req.Labels)) + } + if err := wr.Commit(ctx, total, expected, opts...); err != nil { + return errdefs.ToGRPC(err) + } + } + + msg.Digest = wr.Digest() + } + + if err := session.Send(&msg); err != nil { + return err + } + + req, err = session.Recv() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + } +} + +func (s *service) Abort(ctx context.Context, req *api.AbortRequest) (*ptypes.Empty, error) { + if err := s.store.Abort(ctx, req.Ref); err != nil { + return nil, errdefs.ToGRPC(err) + } + + return &ptypes.Empty{}, nil +} + +func infoToGRPC(info content.Info) api.Info { + return api.Info{ + Digest: info.Digest, + Size_: info.Size, + CreatedAt: info.CreatedAt, + UpdatedAt: info.UpdatedAt, + Labels: info.Labels, + } +} + +func infoFromGRPC(info api.Info) content.Info { + return content.Info{ + Digest: info.Digest, + Size: info.Size_, + CreatedAt: info.CreatedAt, + UpdatedAt: info.UpdatedAt, + Labels: info.Labels, + } +} diff --git a/vendor/github.com/containerd/containerd/snapshots/native/native.go b/vendor/github.com/containerd/containerd/snapshots/native/native.go new file mode 100644 index 00000000..50d87577 --- /dev/null +++ b/vendor/github.com/containerd/containerd/snapshots/native/native.go @@ -0,0 +1,356 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package native + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/snapshots" + "github.com/containerd/containerd/snapshots/storage" + + "github.com/containerd/continuity/fs" + "github.com/pkg/errors" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.SnapshotPlugin, + ID: "native", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec()) + return NewSnapshotter(ic.Root) + }, + }) +} + +type snapshotter struct { + root string + ms *storage.MetaStore +} + +// NewSnapshotter returns a Snapshotter which copies layers on the underlying +// file system. A metadata file is stored under the root. +func NewSnapshotter(root string) (snapshots.Snapshotter, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + ms, err := storage.NewMetaStore(filepath.Join(root, "metadata.db")) + if err != nil { + return nil, err + } + + if err := os.Mkdir(filepath.Join(root, "snapshots"), 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + return &snapshotter{ + root: root, + ms: ms, + }, nil +} + +// Stat returns the info for an active or committed snapshot by name or +// key. +// +// Should be used for parent resolution, existence checks and to discern +// the kind of snapshot. +func (o *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return snapshots.Info{}, err + } + defer t.Rollback() + _, info, _, err := storage.GetInfo(ctx, key) + if err != nil { + return snapshots.Info{}, err + } + + return info, nil +} + +func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return snapshots.Info{}, err + } + + info, err = storage.UpdateInfo(ctx, info, fieldpaths...) + if err != nil { + t.Rollback() + return snapshots.Info{}, err + } + + if err := t.Commit(); err != nil { + return snapshots.Info{}, err + } + + return info, nil +} + +func (o *snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return snapshots.Usage{}, err + } + defer t.Rollback() + + id, info, usage, err := storage.GetInfo(ctx, key) + if err != nil { + return snapshots.Usage{}, err + } + + if info.Kind == snapshots.KindActive { + du, err := fs.DiskUsage(ctx, o.getSnapshotDir(id)) + if err != nil { + return snapshots.Usage{}, err + } + usage = snapshots.Usage(du) + } + + return usage, nil +} + +func (o *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + return o.createSnapshot(ctx, snapshots.KindActive, key, parent, opts) +} + +func (o *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + return o.createSnapshot(ctx, snapshots.KindView, key, parent, opts) +} + +// Mounts returns the mounts for the transaction identified by key. Can be +// called on an read-write or readonly transaction. +// +// This can be used to recover mounts after calling View or Prepare. +func (o *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return nil, err + } + s, err := storage.GetSnapshot(ctx, key) + t.Rollback() + if err != nil { + return nil, errors.Wrap(err, "failed to get snapshot mount") + } + return o.mounts(s), nil +} + +func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return err + } + + id, _, _, err := storage.GetInfo(ctx, key) + if err != nil { + return err + } + + usage, err := fs.DiskUsage(ctx, o.getSnapshotDir(id)) + if err != nil { + return err + } + + if _, err := storage.CommitActive(ctx, key, name, snapshots.Usage(usage), opts...); err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + return errors.Wrap(err, "failed to commit snapshot") + } + return t.Commit() +} + +// Remove abandons the transaction identified by key. All resources +// associated with the key will be removed. +func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return err + } + defer func() { + if err != nil && t != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + } + }() + + id, _, err := storage.Remove(ctx, key) + if err != nil { + return errors.Wrap(err, "failed to remove") + } + + path := o.getSnapshotDir(id) + renamed := filepath.Join(o.root, "snapshots", "rm-"+id) + if err := os.Rename(path, renamed); err != nil { + if !os.IsNotExist(err) { + return errors.Wrap(err, "failed to rename") + } + renamed = "" + } + + err = t.Commit() + t = nil + if err != nil { + if renamed != "" { + if err1 := os.Rename(renamed, path); err1 != nil { + // May cause inconsistent data on disk + log.G(ctx).WithError(err1).WithField("path", renamed).Errorf("failed to rename after failed commit") + } + } + return errors.Wrap(err, "failed to commit") + } + if renamed != "" { + if err := os.RemoveAll(renamed); err != nil { + // Must be cleaned up, any "rm-*" could be removed if no active transactions + log.G(ctx).WithError(err).WithField("path", renamed).Warnf("failed to remove root filesystem") + } + } + + return nil +} + +// Walk the committed snapshots. +func (o *snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return err + } + defer t.Rollback() + return storage.WalkInfo(ctx, fn, fs...) +} + +func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) (_ []mount.Mount, err error) { + var ( + path, td string + ) + + if kind == snapshots.KindActive || parent == "" { + td, err = ioutil.TempDir(filepath.Join(o.root, "snapshots"), "new-") + if err != nil { + return nil, errors.Wrap(err, "failed to create temp dir") + } + if err := os.Chmod(td, 0755); err != nil { + return nil, errors.Wrapf(err, "failed to chmod %s to 0755", td) + } + defer func() { + if err != nil { + if td != "" { + if err1 := os.RemoveAll(td); err1 != nil { + err = errors.Wrapf(err, "remove failed: %v", err1) + } + } + if path != "" { + if err1 := os.RemoveAll(path); err1 != nil { + err = errors.Wrapf(err, "failed to remove path: %v", err1) + } + } + } + }() + } + + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return nil, err + } + + s, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...) + if err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + return nil, errors.Wrap(err, "failed to create snapshot") + } + + if td != "" { + if len(s.ParentIDs) > 0 { + parent := o.getSnapshotDir(s.ParentIDs[0]) + xattrErrorHandler := func(dst, src, xattrKey string, copyErr error) error { + // security.* xattr cannot be copied in most cases (moby/buildkit#1189) + log.G(ctx).WithError(copyErr).Debugf("failed to copy xattr %q", xattrKey) + return nil + } + copyDirOpts := []fs.CopyDirOpt{ + fs.WithXAttrErrorHandler(xattrErrorHandler), + } + if err := fs.CopyDir(td, parent, copyDirOpts...); err != nil { + return nil, errors.Wrap(err, "copying of parent failed") + } + } + + path = o.getSnapshotDir(s.ID) + if err := os.Rename(td, path); err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + return nil, errors.Wrap(err, "failed to rename") + } + td = "" + } + + if err := t.Commit(); err != nil { + return nil, errors.Wrap(err, "commit failed") + } + + return o.mounts(s), nil +} + +func (o *snapshotter) getSnapshotDir(id string) string { + return filepath.Join(o.root, "snapshots", id) +} + +func (o *snapshotter) mounts(s storage.Snapshot) []mount.Mount { + var ( + roFlag string + source string + ) + + if s.Kind == snapshots.KindView { + roFlag = "ro" + } else { + roFlag = "rw" + } + + if len(s.ParentIDs) == 0 || s.Kind == snapshots.KindActive { + source = o.getSnapshotDir(s.ID) + } else { + source = o.getSnapshotDir(s.ParentIDs[0]) + } + + return []mount.Mount{ + { + Source: source, + Type: "bind", + Options: []string{ + roFlag, + "rbind", + }, + }, + } +} + +// Close closes the snapshotter +func (o *snapshotter) Close() error { + return o.ms.Close() +} diff --git a/vendor/github.com/containerd/containerd/snapshots/overlay/check.go b/vendor/github.com/containerd/containerd/snapshots/overlay/check.go new file mode 100644 index 00000000..cec46df0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/snapshots/overlay/check.go @@ -0,0 +1,88 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package overlay + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/continuity/fs" + "github.com/pkg/errors" +) + +// supportsMultipleLowerDir checks if the system supports multiple lowerdirs, +// which is required for the overlay snapshotter. On 4.x kernels, multiple lowerdirs +// are always available (so this check isn't needed), and backported to RHEL and +// CentOS 3.x kernels (3.10.0-693.el7.x86_64 and up). This function is to detect +// support on those kernels, without doing a kernel version compare. +// +// Ported from moby overlay2. +func supportsMultipleLowerDir(d string) error { + td, err := ioutil.TempDir(d, "multiple-lowerdir-check") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + log.L.WithError(err).Warnf("Failed to remove check directory %v", td) + } + }() + + for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} { + if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil { + return err + } + } + + opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", filepath.Join(td, "lower2"), filepath.Join(td, "lower1"), filepath.Join(td, "upper"), filepath.Join(td, "work")) + m := mount.Mount{ + Type: "overlay", + Source: "overlay", + Options: []string{opts}, + } + dest := filepath.Join(td, "merged") + if err := m.Mount(dest); err != nil { + return errors.Wrap(err, "failed to mount overlay") + } + if err := mount.UnmountAll(dest, 0); err != nil { + log.L.WithError(err).Warnf("Failed to unmount check directory %v", dest) + } + return nil +} + +// Supported returns nil when the overlayfs is functional on the system with the root directory. +// Supported is not called during plugin initialization, but exposed for downstream projects which uses +// this snapshotter as a library. +func Supported(root string) error { + if err := os.MkdirAll(root, 0700); err != nil { + return err + } + supportsDType, err := fs.SupportsDType(root) + if err != nil { + return err + } + if !supportsDType { + return fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support", root) + } + return supportsMultipleLowerDir(root) +} diff --git a/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go b/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go new file mode 100644 index 00000000..2222207a --- /dev/null +++ b/vendor/github.com/containerd/containerd/snapshots/overlay/overlay.go @@ -0,0 +1,513 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package overlay + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/snapshots" + "github.com/containerd/containerd/snapshots/storage" + "github.com/containerd/continuity/fs" + "github.com/pkg/errors" +) + +func init() { + plugin.Register(&plugin.Registration{ + Type: plugin.SnapshotPlugin, + ID: "overlayfs", + InitFn: func(ic *plugin.InitContext) (interface{}, error) { + ic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec()) + ic.Meta.Exports["root"] = ic.Root + return NewSnapshotter(ic.Root, AsynchronousRemove) + }, + }) +} + +// SnapshotterConfig is used to configure the overlay snapshotter instance +type SnapshotterConfig struct { + asyncRemove bool +} + +// Opt is an option to configure the overlay snapshotter +type Opt func(config *SnapshotterConfig) error + +// AsynchronousRemove defers removal of filesystem content until +// the Cleanup method is called. Removals will make the snapshot +// referred to by the key unavailable and make the key immediately +// available for re-use. +func AsynchronousRemove(config *SnapshotterConfig) error { + config.asyncRemove = true + return nil +} + +type snapshotter struct { + root string + ms *storage.MetaStore + asyncRemove bool +} + +// NewSnapshotter returns a Snapshotter which uses overlayfs. The overlayfs +// diffs are stored under the provided root. A metadata file is stored under +// the root. +func NewSnapshotter(root string, opts ...Opt) (snapshots.Snapshotter, error) { + var config SnapshotterConfig + for _, opt := range opts { + if err := opt(&config); err != nil { + return nil, err + } + } + + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + supportsDType, err := fs.SupportsDType(root) + if err != nil { + return nil, err + } + if !supportsDType { + return nil, fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support", root) + } + ms, err := storage.NewMetaStore(filepath.Join(root, "metadata.db")) + if err != nil { + return nil, err + } + + if err := os.Mkdir(filepath.Join(root, "snapshots"), 0700); err != nil && !os.IsExist(err) { + return nil, err + } + + return &snapshotter{ + root: root, + ms: ms, + asyncRemove: config.asyncRemove, + }, nil +} + +// Stat returns the info for an active or committed snapshot by name or +// key. +// +// Should be used for parent resolution, existence checks and to discern +// the kind of snapshot. +func (o *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return snapshots.Info{}, err + } + defer t.Rollback() + _, info, _, err := storage.GetInfo(ctx, key) + if err != nil { + return snapshots.Info{}, err + } + + return info, nil +} + +func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return snapshots.Info{}, err + } + + info, err = storage.UpdateInfo(ctx, info, fieldpaths...) + if err != nil { + t.Rollback() + return snapshots.Info{}, err + } + + if err := t.Commit(); err != nil { + return snapshots.Info{}, err + } + + return info, nil +} + +// Usage returns the resources taken by the snapshot identified by key. +// +// For active snapshots, this will scan the usage of the overlay "diff" (aka +// "upper") directory and may take some time. +// +// For committed snapshots, the value is returned from the metadata database. +func (o *snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return snapshots.Usage{}, err + } + id, info, usage, err := storage.GetInfo(ctx, key) + t.Rollback() // transaction no longer needed at this point. + + if err != nil { + return snapshots.Usage{}, err + } + + if info.Kind == snapshots.KindActive { + upperPath := o.upperPath(id) + du, err := fs.DiskUsage(ctx, upperPath) + if err != nil { + // TODO(stevvooe): Consider not reporting an error in this case. + return snapshots.Usage{}, err + } + + usage = snapshots.Usage(du) + } + + return usage, nil +} + +func (o *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + return o.createSnapshot(ctx, snapshots.KindActive, key, parent, opts) +} + +func (o *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + return o.createSnapshot(ctx, snapshots.KindView, key, parent, opts) +} + +// Mounts returns the mounts for the transaction identified by key. Can be +// called on an read-write or readonly transaction. +// +// This can be used to recover mounts after calling View or Prepare. +func (o *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return nil, err + } + s, err := storage.GetSnapshot(ctx, key) + t.Rollback() + if err != nil { + return nil, errors.Wrap(err, "failed to get active mount") + } + return o.mounts(s), nil +} + +func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return err + } + + defer func() { + if err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + } + }() + + // grab the existing id + id, _, _, err := storage.GetInfo(ctx, key) + if err != nil { + return err + } + + usage, err := fs.DiskUsage(ctx, o.upperPath(id)) + if err != nil { + return err + } + + if _, err = storage.CommitActive(ctx, key, name, snapshots.Usage(usage), opts...); err != nil { + return errors.Wrap(err, "failed to commit snapshot") + } + return t.Commit() +} + +// Remove abandons the snapshot identified by key. The snapshot will +// immediately become unavailable and unrecoverable. Disk space will +// be freed up on the next call to `Cleanup`. +func (o *snapshotter) Remove(ctx context.Context, key string) (err error) { + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return err + } + defer func() { + if err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + } + }() + + _, _, err = storage.Remove(ctx, key) + if err != nil { + return errors.Wrap(err, "failed to remove") + } + + if !o.asyncRemove { + var removals []string + removals, err = o.getCleanupDirectories(ctx, t) + if err != nil { + return errors.Wrap(err, "unable to get directories for removal") + } + + // Remove directories after the transaction is closed, failures must not + // return error since the transaction is committed with the removal + // key no longer available. + defer func() { + if err == nil { + for _, dir := range removals { + if err := os.RemoveAll(dir); err != nil { + log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory") + } + } + } + }() + + } + + return t.Commit() +} + +// Walk the snapshots. +func (o *snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error { + ctx, t, err := o.ms.TransactionContext(ctx, false) + if err != nil { + return err + } + defer t.Rollback() + return storage.WalkInfo(ctx, fn, fs...) +} + +// Cleanup cleans up disk resources from removed or abandoned snapshots +func (o *snapshotter) Cleanup(ctx context.Context) error { + cleanup, err := o.cleanupDirectories(ctx) + if err != nil { + return err + } + + for _, dir := range cleanup { + if err := os.RemoveAll(dir); err != nil { + log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory") + } + } + + return nil +} + +func (o *snapshotter) cleanupDirectories(ctx context.Context) ([]string, error) { + // Get a write transaction to ensure no other write transaction can be entered + // while the cleanup is scanning. + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return nil, err + } + + defer t.Rollback() + return o.getCleanupDirectories(ctx, t) +} + +func (o *snapshotter) getCleanupDirectories(ctx context.Context, t storage.Transactor) ([]string, error) { + ids, err := storage.IDMap(ctx) + if err != nil { + return nil, err + } + + snapshotDir := filepath.Join(o.root, "snapshots") + fd, err := os.Open(snapshotDir) + if err != nil { + return nil, err + } + defer fd.Close() + + dirs, err := fd.Readdirnames(0) + if err != nil { + return nil, err + } + + cleanup := []string{} + for _, d := range dirs { + if _, ok := ids[d]; ok { + continue + } + + cleanup = append(cleanup, filepath.Join(snapshotDir, d)) + } + + return cleanup, nil +} + +func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) (_ []mount.Mount, err error) { + ctx, t, err := o.ms.TransactionContext(ctx, true) + if err != nil { + return nil, err + } + + var td, path string + defer func() { + if err != nil { + if td != "" { + if err1 := os.RemoveAll(td); err1 != nil { + log.G(ctx).WithError(err1).Warn("failed to cleanup temp snapshot directory") + } + } + if path != "" { + if err1 := os.RemoveAll(path); err1 != nil { + log.G(ctx).WithError(err1).WithField("path", path).Error("failed to reclaim snapshot directory, directory may need removal") + err = errors.Wrapf(err, "failed to remove path: %v", err1) + } + } + } + }() + + snapshotDir := filepath.Join(o.root, "snapshots") + td, err = o.prepareDirectory(ctx, snapshotDir, kind) + if err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + return nil, errors.Wrap(err, "failed to create prepare snapshot dir") + } + rollback := true + defer func() { + if rollback { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + } + }() + + s, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...) + if err != nil { + return nil, errors.Wrap(err, "failed to create snapshot") + } + + if len(s.ParentIDs) > 0 { + st, err := os.Stat(o.upperPath(s.ParentIDs[0])) + if err != nil { + return nil, errors.Wrap(err, "failed to stat parent") + } + + stat := st.Sys().(*syscall.Stat_t) + + if err := os.Lchown(filepath.Join(td, "fs"), int(stat.Uid), int(stat.Gid)); err != nil { + if rerr := t.Rollback(); rerr != nil { + log.G(ctx).WithError(rerr).Warn("failed to rollback transaction") + } + return nil, errors.Wrap(err, "failed to chown") + } + } + + path = filepath.Join(snapshotDir, s.ID) + if err = os.Rename(td, path); err != nil { + return nil, errors.Wrap(err, "failed to rename") + } + td = "" + + rollback = false + if err = t.Commit(); err != nil { + return nil, errors.Wrap(err, "commit failed") + } + + return o.mounts(s), nil +} + +func (o *snapshotter) prepareDirectory(ctx context.Context, snapshotDir string, kind snapshots.Kind) (string, error) { + td, err := ioutil.TempDir(snapshotDir, "new-") + if err != nil { + return "", errors.Wrap(err, "failed to create temp dir") + } + + if err := os.Mkdir(filepath.Join(td, "fs"), 0755); err != nil { + return td, err + } + + if kind == snapshots.KindActive { + if err := os.Mkdir(filepath.Join(td, "work"), 0711); err != nil { + return td, err + } + } + + return td, nil +} + +func (o *snapshotter) mounts(s storage.Snapshot) []mount.Mount { + if len(s.ParentIDs) == 0 { + // if we only have one layer/no parents then just return a bind mount as overlay + // will not work + roFlag := "rw" + if s.Kind == snapshots.KindView { + roFlag = "ro" + } + + return []mount.Mount{ + { + Source: o.upperPath(s.ID), + Type: "bind", + Options: []string{ + roFlag, + "rbind", + }, + }, + } + } + var options []string + + if s.Kind == snapshots.KindActive { + options = append(options, + fmt.Sprintf("workdir=%s", o.workPath(s.ID)), + fmt.Sprintf("upperdir=%s", o.upperPath(s.ID)), + ) + } else if len(s.ParentIDs) == 1 { + return []mount.Mount{ + { + Source: o.upperPath(s.ParentIDs[0]), + Type: "bind", + Options: []string{ + "ro", + "rbind", + }, + }, + } + } + + parentPaths := make([]string, len(s.ParentIDs)) + for i := range s.ParentIDs { + parentPaths[i] = o.upperPath(s.ParentIDs[i]) + } + + options = append(options, fmt.Sprintf("lowerdir=%s", strings.Join(parentPaths, ":"))) + return []mount.Mount{ + { + Type: "overlay", + Source: "overlay", + Options: options, + }, + } + +} + +func (o *snapshotter) upperPath(id string) string { + return filepath.Join(o.root, "snapshots", id, "fs") +} + +func (o *snapshotter) workPath(id string) string { + return filepath.Join(o.root, "snapshots", id, "work") +} + +// Close closes the snapshotter +func (o *snapshotter) Close() error { + return o.ms.Close() +} diff --git a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go new file mode 100644 index 00000000..edf29d89 --- /dev/null +++ b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go @@ -0,0 +1,381 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package snapshots + +import ( + "context" + "encoding/json" + "strings" + "time" + + "github.com/containerd/containerd/mount" +) + +const ( + inheritedLabelsPrefix = "containerd.io/snapshot/" + labelSnapshotRef = "containerd.io/snapshot.ref" +) + +// Kind identifies the kind of snapshot. +type Kind uint8 + +// definitions of snapshot kinds +const ( + KindUnknown Kind = iota + KindView + KindActive + KindCommitted +) + +// ParseKind parses the provided string into a Kind +// +// If the string cannot be parsed KindUnknown is returned +func ParseKind(s string) Kind { + s = strings.ToLower(s) + switch s { + case "view": + return KindView + case "active": + return KindActive + case "committed": + return KindCommitted + } + + return KindUnknown +} + +// String returns the string representation of the Kind +func (k Kind) String() string { + switch k { + case KindView: + return "View" + case KindActive: + return "Active" + case KindCommitted: + return "Committed" + } + + return "Unknown" +} + +// MarshalJSON the Kind to JSON +func (k Kind) MarshalJSON() ([]byte, error) { + return json.Marshal(k.String()) +} + +// UnmarshalJSON the Kind from JSON +func (k *Kind) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + + *k = ParseKind(s) + return nil +} + +// Info provides information about a particular snapshot. +// JSON marshallability is supported for interactive with tools like ctr, +type Info struct { + Kind Kind // active or committed snapshot + Name string // name or key of snapshot + Parent string `json:",omitempty"` // name of parent snapshot + + // Labels for a snapshot. + // + // Note: only labels prefixed with `containerd.io/snapshot/` will be inherited by the + // snapshotter's `Prepare`, `View`, or `Commit` calls. + Labels map[string]string `json:",omitempty"` + Created time.Time `json:",omitempty"` // Created time + Updated time.Time `json:",omitempty"` // Last update time +} + +// Usage defines statistics for disk resources consumed by the snapshot. +// +// These resources only include the resources consumed by the snapshot itself +// and does not include resources usage by the parent. +type Usage struct { + Inodes int64 // number of inodes in use. + Size int64 // provides usage, in bytes, of snapshot +} + +// Add the provided usage to the current usage +func (u *Usage) Add(other Usage) { + u.Size += other.Size + + // TODO(stevvooe): assumes independent inodes, but provides and upper + // bound. This should be pretty close, assuming the inodes for a + // snapshot are roughly unique to it. Don't trust this assumption. + u.Inodes += other.Inodes +} + +// WalkFunc defines the callback for a snapshot walk. +type WalkFunc func(context.Context, Info) error + +// Snapshotter defines the methods required to implement a snapshot snapshotter for +// allocating, snapshotting and mounting filesystem changesets. The model works +// by building up sets of changes with parent-child relationships. +// +// A snapshot represents a filesystem state. Every snapshot has a parent, where +// the empty parent is represented by the empty string. A diff can be taken +// between a parent and its snapshot to generate a classic layer. +// +// An active snapshot is created by calling `Prepare`. After mounting, changes +// can be made to the snapshot. The act of committing creates a committed +// snapshot. The committed snapshot will get the parent of active snapshot. The +// committed snapshot can then be used as a parent. Active snapshots can never +// act as a parent. +// +// Snapshots are best understood by their lifecycle. Active snapshots are +// always created with Prepare or View. Committed snapshots are always created +// with Commit. Active snapshots never become committed snapshots and vice +// versa. All snapshots may be removed. +// +// For consistency, we define the following terms to be used throughout this +// interface for snapshotter implementations: +// +// `ctx` - refers to a context.Context +// `key` - refers to an active snapshot +// `name` - refers to a committed snapshot +// `parent` - refers to the parent in relation +// +// Most methods take various combinations of these identifiers. Typically, +// `name` and `parent` will be used in cases where a method *only* takes +// committed snapshots. `key` will be used to refer to active snapshots in most +// cases, except where noted. All variables used to access snapshots use the +// same key space. For example, an active snapshot may not share the same key +// with a committed snapshot. +// +// We cover several examples below to demonstrate the utility of a snapshot +// snapshotter. +// +// Importing a Layer +// +// To import a layer, we simply have the Snapshotter provide a list of +// mounts to be applied such that our dst will capture a changeset. We start +// out by getting a path to the layer tar file and creating a temp location to +// unpack it to: +// +// layerPath, tmpDir := getLayerPath(), mkTmpDir() // just a path to layer tar file. +// +// We start by using a Snapshotter to Prepare a new snapshot transaction, using a +// key and descending from the empty parent "". To prevent our layer from being +// garbage collected during unpacking, we add the `containerd.io/gc.root` label: +// +// noGcOpt := snapshots.WithLabels(map[string]string{ +// "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339), +// }) +// mounts, err := snapshotter.Prepare(ctx, key, "", noGcOpt) +// if err != nil { ... } +// +// We get back a list of mounts from Snapshotter.Prepare, with the key identifying +// the active snapshot. Mount this to the temporary location with the +// following: +// +// if err := mount.All(mounts, tmpDir); err != nil { ... } +// +// Once the mounts are performed, our temporary location is ready to capture +// a diff. In practice, this works similar to a filesystem transaction. The +// next step is to unpack the layer. We have a special function unpackLayer +// that applies the contents of the layer to target location and calculates the +// DiffID of the unpacked layer (this is a requirement for docker +// implementation): +// +// layer, err := os.Open(layerPath) +// if err != nil { ... } +// digest, err := unpackLayer(tmpLocation, layer) // unpack into layer location +// if err != nil { ... } +// +// When the above completes, we should have a filesystem the represents the +// contents of the layer. Careful implementations should verify that digest +// matches the expected DiffID. When completed, we unmount the mounts: +// +// unmount(mounts) // optional, for now +// +// Now that we've verified and unpacked our layer, we commit the active +// snapshot to a name. For this example, we are just going to use the layer +// digest, but in practice, this will probably be the ChainID. This also removes +// the active snapshot: +// +// if err := snapshotter.Commit(ctx, digest.String(), key, noGcOpt); err != nil { ... } +// +// Now, we have a layer in the Snapshotter that can be accessed with the digest +// provided during commit. +// +// Importing the Next Layer +// +// Making a layer depend on the above is identical to the process described +// above except that the parent is provided as parent when calling +// Manager.Prepare, assuming a clean, unique key identifier: +// +// mounts, err := snapshotter.Prepare(ctx, key, parentDigest, noGcOpt) +// +// We then mount, apply and commit, as we did above. The new snapshot will be +// based on the content of the previous one. +// +// Running a Container +// +// To run a container, we simply provide Snapshotter.Prepare the committed image +// snapshot as the parent. After mounting, the prepared path can +// be used directly as the container's filesystem: +// +// mounts, err := snapshotter.Prepare(ctx, containerKey, imageRootFSChainID) +// +// The returned mounts can then be passed directly to the container runtime. If +// one would like to create a new image from the filesystem, Manager.Commit is +// called: +// +// if err := snapshotter.Commit(ctx, newImageSnapshot, containerKey); err != nil { ... } +// +// Alternatively, for most container runs, Snapshotter.Remove will be called to +// signal the Snapshotter to abandon the changes. +type Snapshotter interface { + // Stat returns the info for an active or committed snapshot by name or + // key. + // + // Should be used for parent resolution, existence checks and to discern + // the kind of snapshot. + Stat(ctx context.Context, key string) (Info, error) + + // Update updates the info for a snapshot. + // + // Only mutable properties of a snapshot may be updated. + Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error) + + // Usage returns the resource usage of an active or committed snapshot + // excluding the usage of parent snapshots. + // + // The running time of this call for active snapshots is dependent on + // implementation, but may be proportional to the size of the resource. + // Callers should take this into consideration. Implementations should + // attempt to honer context cancellation and avoid taking locks when making + // the calculation. + Usage(ctx context.Context, key string) (Usage, error) + + // Mounts returns the mounts for the active snapshot transaction identified + // by key. Can be called on an read-write or readonly transaction. This is + // available only for active snapshots. + // + // This can be used to recover mounts after calling View or Prepare. + Mounts(ctx context.Context, key string) ([]mount.Mount, error) + + // Prepare creates an active snapshot identified by key descending from the + // provided parent. The returned mounts can be used to mount the snapshot + // to capture changes. + // + // If a parent is provided, after performing the mounts, the destination + // will start with the content of the parent. The parent must be a + // committed snapshot. Changes to the mounted destination will be captured + // in relation to the parent. The default parent, "", is an empty + // directory. + // + // The changes may be saved to a committed snapshot by calling Commit. When + // one is done with the transaction, Remove should be called on the key. + // + // Multiple calls to Prepare or View with the same key should fail. + Prepare(ctx context.Context, key, parent string, opts ...Opt) ([]mount.Mount, error) + + // View behaves identically to Prepare except the result may not be + // committed back to the snapshot snapshotter. View returns a readonly view on + // the parent, with the active snapshot being tracked by the given key. + // + // This method operates identically to Prepare, except that Mounts returned + // may have the readonly flag set. Any modifications to the underlying + // filesystem will be ignored. Implementations may perform this in a more + // efficient manner that differs from what would be attempted with + // `Prepare`. + // + // Commit may not be called on the provided key and will return an error. + // To collect the resources associated with key, Remove must be called with + // key as the argument. + View(ctx context.Context, key, parent string, opts ...Opt) ([]mount.Mount, error) + + // Commit captures the changes between key and its parent into a snapshot + // identified by name. The name can then be used with the snapshotter's other + // methods to create subsequent snapshots. + // + // A committed snapshot will be created under name with the parent of the + // active snapshot. + // + // After commit, the snapshot identified by key is removed. + Commit(ctx context.Context, name, key string, opts ...Opt) error + + // Remove the committed or active snapshot by the provided key. + // + // All resources associated with the key will be removed. + // + // If the snapshot is a parent of another snapshot, its children must be + // removed before proceeding. + Remove(ctx context.Context, key string) error + + // Walk will call the provided function for each snapshot in the + // snapshotter which match the provided filters. If no filters are + // given all items will be walked. + // Filters: + // name + // parent + // kind (active,view,committed) + // labels.(label) + Walk(ctx context.Context, fn WalkFunc, filters ...string) error + + // Close releases the internal resources. + // + // Close is expected to be called on the end of the lifecycle of the snapshotter, + // but not mandatory. + // + // Close returns nil when it is already closed. + Close() error +} + +// Cleaner defines a type capable of performing asynchronous resource cleanup. +// The Cleaner interface should be used by snapshotters which implement fast +// removal and deferred resource cleanup. This prevents snapshots from needing +// to perform lengthy resource cleanup before acknowledging a snapshot key +// has been removed and available for re-use. This is also useful when +// performing multi-key removal with the intent of cleaning up all the +// resources after each snapshot key has been removed. +type Cleaner interface { + Cleanup(ctx context.Context) error +} + +// Opt allows setting mutable snapshot properties on creation +type Opt func(info *Info) error + +// WithLabels adds labels to a created snapshot +func WithLabels(labels map[string]string) Opt { + return func(info *Info) error { + info.Labels = labels + return nil + } +} + +// FilterInheritedLabels filters the provided labels by removing any key which +// isn't a snapshot label. Snapshot labels have a prefix of "containerd.io/snapshot/" +// or are the "containerd.io/snapshot.ref" label. +func FilterInheritedLabels(labels map[string]string) map[string]string { + if labels == nil { + return nil + } + + filtered := make(map[string]string) + for k, v := range labels { + if k == labelSnapshotRef || strings.HasPrefix(k, inheritedLabelsPrefix) { + filtered[k] = v + } + } + return filtered +} diff --git a/vendor/github.com/containerd/containerd/snapshots/storage/bolt.go b/vendor/github.com/containerd/containerd/snapshots/storage/bolt.go new file mode 100644 index 00000000..712c71f7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/snapshots/storage/bolt.go @@ -0,0 +1,648 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package storage + +import ( + "context" + "encoding/binary" + "fmt" + "strings" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/snapshots" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +var ( + bucketKeyStorageVersion = []byte("v1") + bucketKeySnapshot = []byte("snapshots") + bucketKeyParents = []byte("parents") + + bucketKeyID = []byte("id") + bucketKeyParent = []byte("parent") + bucketKeyKind = []byte("kind") + bucketKeyInodes = []byte("inodes") + bucketKeySize = []byte("size") + + // ErrNoTransaction is returned when an operation is attempted with + // a context which is not inside of a transaction. + ErrNoTransaction = errors.New("no transaction in context") +) + +// parentKey returns a composite key of the parent and child identifiers. The +// parts of the key are separated by a zero byte. +func parentKey(parent, child uint64) []byte { + b := make([]byte, binary.Size([]uint64{parent, child})+1) + i := binary.PutUvarint(b, parent) + j := binary.PutUvarint(b[i+1:], child) + return b[0 : i+j+1] +} + +// parentPrefixKey returns the parent part of the composite key with the +// zero byte separator. +func parentPrefixKey(parent uint64) []byte { + b := make([]byte, binary.Size(parent)+1) + i := binary.PutUvarint(b, parent) + return b[0 : i+1] +} + +// getParentPrefix returns the first part of the composite key which +// represents the parent identifier. +func getParentPrefix(b []byte) uint64 { + parent, _ := binary.Uvarint(b) + return parent +} + +// GetInfo returns the snapshot Info directly from the metadata. Requires a +// context with a storage transaction. +func GetInfo(ctx context.Context, key string) (string, snapshots.Info, snapshots.Usage, error) { + var ( + id uint64 + su snapshots.Usage + si = snapshots.Info{ + Name: key, + } + ) + err := withSnapshotBucket(ctx, key, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + getUsage(bkt, &su) + return readSnapshot(bkt, &id, &si) + }) + if err != nil { + return "", snapshots.Info{}, snapshots.Usage{}, err + } + + return fmt.Sprintf("%d", id), si, su, nil +} + +// UpdateInfo updates an existing snapshot info's data +func UpdateInfo(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { + updated := snapshots.Info{ + Name: info.Name, + } + err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + sbkt := bkt.Bucket([]byte(info.Name)) + if sbkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "snapshot does not exist") + } + if err := readSnapshot(sbkt, nil, &updated); err != nil { + return err + } + + if len(fieldpaths) > 0 { + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if updated.Labels == nil { + updated.Labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + updated.Labels[key] = info.Labels[key] + continue + } + + switch path { + case "labels": + updated.Labels = info.Labels + default: + return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on snapshot %q", path, info.Name) + } + } + } else { + // Set mutable fields + updated.Labels = info.Labels + } + updated.Updated = time.Now().UTC() + if err := boltutil.WriteTimestamps(sbkt, updated.Created, updated.Updated); err != nil { + return err + } + + return boltutil.WriteLabels(sbkt, updated.Labels) + }) + if err != nil { + return snapshots.Info{}, err + } + return updated, nil +} + +// WalkInfo iterates through all metadata Info for the stored snapshots and +// calls the provided function for each. Requires a context with a storage +// transaction. +func WalkInfo(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error { + filter, err := filters.ParseAll(fs...) + if err != nil { + return err + } + // TODO: allow indexes (name, parent, specific labels) + return withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + return bkt.ForEach(func(k, v []byte) error { + // skip non buckets + if v != nil { + return nil + } + var ( + sbkt = bkt.Bucket(k) + si = snapshots.Info{ + Name: string(k), + } + ) + if err := readSnapshot(sbkt, nil, &si); err != nil { + return err + } + if !filter.Match(adaptSnapshot(si)) { + return nil + } + + return fn(ctx, si) + }) + }) +} + +// GetSnapshot returns the metadata for the active or view snapshot transaction +// referenced by the given key. Requires a context with a storage transaction. +func GetSnapshot(ctx context.Context, key string) (s Snapshot, err error) { + err = withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "snapshot does not exist") + } + + s.ID = fmt.Sprintf("%d", readID(sbkt)) + s.Kind = readKind(sbkt) + + if s.Kind != snapshots.KindActive && s.Kind != snapshots.KindView { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "requested snapshot %v not active or view", key) + } + + if parentKey := sbkt.Get(bucketKeyParent); len(parentKey) > 0 { + spbkt := bkt.Bucket(parentKey) + if spbkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "parent does not exist") + } + + s.ParentIDs, err = parents(bkt, spbkt, readID(spbkt)) + if err != nil { + return errors.Wrap(err, "failed to get parent chain") + } + } + return nil + }) + if err != nil { + return Snapshot{}, err + } + + return +} + +// CreateSnapshot inserts a record for an active or view snapshot with the provided parent. +func CreateSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts ...snapshots.Opt) (s Snapshot, err error) { + switch kind { + case snapshots.KindActive, snapshots.KindView: + default: + return Snapshot{}, errors.Wrapf(errdefs.ErrInvalidArgument, "snapshot type %v invalid; only snapshots of type Active or View can be created", kind) + } + var base snapshots.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return Snapshot{}, err + } + } + + err = createBucketIfNotExists(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + var ( + spbkt *bolt.Bucket + ) + if parent != "" { + spbkt = bkt.Bucket([]byte(parent)) + if spbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "missing parent %q bucket", parent) + } + + if readKind(spbkt) != snapshots.KindCommitted { + return errors.Wrapf(errdefs.ErrInvalidArgument, "parent %q is not committed snapshot", parent) + } + } + sbkt, err := bkt.CreateBucket([]byte(key)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %v", key) + } + return err + } + + id, err := bkt.NextSequence() + if err != nil { + return errors.Wrapf(err, "unable to get identifier for snapshot %q", key) + } + + t := time.Now().UTC() + si := snapshots.Info{ + Parent: parent, + Kind: kind, + Labels: base.Labels, + Created: t, + Updated: t, + } + if err := putSnapshot(sbkt, id, si); err != nil { + return err + } + + if spbkt != nil { + pid := readID(spbkt) + + // Store a backlink from the key to the parent. Store the snapshot name + // as the value to allow following the backlink to the snapshot value. + if err := pbkt.Put(parentKey(pid, id), []byte(key)); err != nil { + return errors.Wrapf(err, "failed to write parent link for snapshot %q", key) + } + + s.ParentIDs, err = parents(bkt, spbkt, pid) + if err != nil { + return errors.Wrapf(err, "failed to get parent chain for snapshot %q", key) + } + } + + s.ID = fmt.Sprintf("%d", id) + s.Kind = kind + return nil + }) + if err != nil { + return Snapshot{}, err + } + + return +} + +// Remove removes a snapshot from the metastore. The string identifier for the +// snapshot is returned as well as the kind. The provided context must contain a +// writable transaction. +func Remove(ctx context.Context, key string) (string, snapshots.Kind, error) { + var ( + id uint64 + si snapshots.Info + ) + + if err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v", key) + } + + if err := readSnapshot(sbkt, &id, &si); err != nil { + return errors.Wrapf(err, "failed to read snapshot %s", key) + } + + if pbkt != nil { + k, _ := pbkt.Cursor().Seek(parentPrefixKey(id)) + if getParentPrefix(k) == id { + return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot remove snapshot with child") + } + + if si.Parent != "" { + spbkt := bkt.Bucket([]byte(si.Parent)) + if spbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v", key) + } + + if err := pbkt.Delete(parentKey(readID(spbkt), id)); err != nil { + return errors.Wrap(err, "failed to delete parent link") + } + } + } + + if err := bkt.DeleteBucket([]byte(key)); err != nil { + return errors.Wrap(err, "failed to delete snapshot") + } + + return nil + }); err != nil { + return "", 0, err + } + + return fmt.Sprintf("%d", id), si.Kind, nil +} + +// CommitActive renames the active snapshot transaction referenced by `key` +// as a committed snapshot referenced by `Name`. The resulting snapshot will be +// committed and readonly. The `key` reference will no longer be available for +// lookup or removal. The returned string identifier for the committed snapshot +// is the same identifier of the original active snapshot. The provided context +// must contain a writable transaction. +func CommitActive(ctx context.Context, key, name string, usage snapshots.Usage, opts ...snapshots.Opt) (string, error) { + var ( + id uint64 + base snapshots.Info + ) + for _, opt := range opts { + if err := opt(&base); err != nil { + return "", err + } + } + + if err := withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + dbkt, err := bkt.CreateBucket([]byte(name)) + if err != nil { + if err == bolt.ErrBucketExists { + err = errdefs.ErrAlreadyExists + } + return errors.Wrapf(err, "committed snapshot %v", name) + } + sbkt := bkt.Bucket([]byte(key)) + if sbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "failed to get active snapshot %q", key) + } + + var si snapshots.Info + if err := readSnapshot(sbkt, &id, &si); err != nil { + return errors.Wrapf(err, "failed to read active snapshot %q", key) + } + + if si.Kind != snapshots.KindActive { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "snapshot %q is not active", key) + } + si.Kind = snapshots.KindCommitted + si.Created = time.Now().UTC() + si.Updated = si.Created + + // Replace labels, do not inherit + si.Labels = base.Labels + + if err := putSnapshot(dbkt, id, si); err != nil { + return err + } + if err := putUsage(dbkt, usage); err != nil { + return err + } + if err := bkt.DeleteBucket([]byte(key)); err != nil { + return errors.Wrapf(err, "failed to delete active snapshot %q", key) + } + if si.Parent != "" { + spbkt := bkt.Bucket([]byte(si.Parent)) + if spbkt == nil { + return errors.Wrapf(errdefs.ErrNotFound, "missing parent %q of snapshot %q", si.Parent, key) + } + pid := readID(spbkt) + + // Updates parent back link to use new key + if err := pbkt.Put(parentKey(pid, id), []byte(name)); err != nil { + return errors.Wrapf(err, "failed to update parent link %q from %q to %q", pid, key, name) + } + } + + return nil + }); err != nil { + return "", err + } + + return fmt.Sprintf("%d", id), nil +} + +// IDMap returns all the IDs mapped to their key +func IDMap(ctx context.Context) (map[string]string, error) { + m := map[string]string{} + if err := withBucket(ctx, func(ctx context.Context, bkt, _ *bolt.Bucket) error { + return bkt.ForEach(func(k, v []byte) error { + // skip non buckets + if v != nil { + return nil + } + id := readID(bkt.Bucket(k)) + m[fmt.Sprintf("%d", id)] = string(k) + return nil + }) + }); err != nil { + return nil, err + } + + return m, nil +} + +func withSnapshotBucket(ctx context.Context, key string, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return ErrNoTransaction + } + vbkt := tx.Bucket(bucketKeyStorageVersion) + if vbkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "bucket does not exist") + } + bkt := vbkt.Bucket(bucketKeySnapshot) + if bkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "snapshots bucket does not exist") + } + bkt = bkt.Bucket([]byte(key)) + if bkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "snapshot does not exist") + } + + return fn(ctx, bkt, vbkt.Bucket(bucketKeyParents)) +} + +func withBucket(ctx context.Context, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return ErrNoTransaction + } + bkt := tx.Bucket(bucketKeyStorageVersion) + if bkt == nil { + return errors.Wrap(errdefs.ErrNotFound, "bucket does not exist") + } + return fn(ctx, bkt.Bucket(bucketKeySnapshot), bkt.Bucket(bucketKeyParents)) +} + +func createBucketIfNotExists(ctx context.Context, fn func(context.Context, *bolt.Bucket, *bolt.Bucket) error) error { + tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx) + if !ok { + return ErrNoTransaction + } + + bkt, err := tx.CreateBucketIfNotExists(bucketKeyStorageVersion) + if err != nil { + return errors.Wrap(err, "failed to create version bucket") + } + sbkt, err := bkt.CreateBucketIfNotExists(bucketKeySnapshot) + if err != nil { + return errors.Wrap(err, "failed to create snapshots bucket") + } + pbkt, err := bkt.CreateBucketIfNotExists(bucketKeyParents) + if err != nil { + return errors.Wrap(err, "failed to create parents bucket") + } + return fn(ctx, sbkt, pbkt) +} + +func parents(bkt, pbkt *bolt.Bucket, parent uint64) (parents []string, err error) { + for { + parents = append(parents, fmt.Sprintf("%d", parent)) + + parentKey := pbkt.Get(bucketKeyParent) + if len(parentKey) == 0 { + return + } + pbkt = bkt.Bucket(parentKey) + if pbkt == nil { + return nil, errors.Wrap(errdefs.ErrNotFound, "missing parent") + } + + parent = readID(pbkt) + } +} + +func readKind(bkt *bolt.Bucket) (k snapshots.Kind) { + kind := bkt.Get(bucketKeyKind) + if len(kind) == 1 { + k = snapshots.Kind(kind[0]) + } + return +} + +func readID(bkt *bolt.Bucket) uint64 { + id, _ := binary.Uvarint(bkt.Get(bucketKeyID)) + return id +} + +func readSnapshot(bkt *bolt.Bucket, id *uint64, si *snapshots.Info) error { + if id != nil { + *id = readID(bkt) + } + if si != nil { + si.Kind = readKind(bkt) + si.Parent = string(bkt.Get(bucketKeyParent)) + + if err := boltutil.ReadTimestamps(bkt, &si.Created, &si.Updated); err != nil { + return err + } + + labels, err := boltutil.ReadLabels(bkt) + if err != nil { + return err + } + si.Labels = labels + } + + return nil +} + +func putSnapshot(bkt *bolt.Bucket, id uint64, si snapshots.Info) error { + idEncoded, err := encodeID(id) + if err != nil { + return err + } + + updates := [][2][]byte{ + {bucketKeyID, idEncoded}, + {bucketKeyKind, []byte{byte(si.Kind)}}, + } + if si.Parent != "" { + updates = append(updates, [2][]byte{bucketKeyParent, []byte(si.Parent)}) + } + for _, v := range updates { + if err := bkt.Put(v[0], v[1]); err != nil { + return err + } + } + if err := boltutil.WriteTimestamps(bkt, si.Created, si.Updated); err != nil { + return err + } + return boltutil.WriteLabels(bkt, si.Labels) +} + +func getUsage(bkt *bolt.Bucket, usage *snapshots.Usage) { + usage.Inodes, _ = binary.Varint(bkt.Get(bucketKeyInodes)) + usage.Size, _ = binary.Varint(bkt.Get(bucketKeySize)) +} + +func putUsage(bkt *bolt.Bucket, usage snapshots.Usage) error { + for _, v := range []struct { + key []byte + value int64 + }{ + {bucketKeyInodes, usage.Inodes}, + {bucketKeySize, usage.Size}, + } { + e, err := encodeSize(v.value) + if err != nil { + return err + } + if err := bkt.Put(v.key, e); err != nil { + return err + } + } + return nil +} + +func encodeSize(size int64) ([]byte, error) { + var ( + buf [binary.MaxVarintLen64]byte + sizeEncoded = buf[:] + ) + sizeEncoded = sizeEncoded[:binary.PutVarint(sizeEncoded, size)] + + if len(sizeEncoded) == 0 { + return nil, fmt.Errorf("failed encoding size = %v", size) + } + return sizeEncoded, nil +} + +func encodeID(id uint64) ([]byte, error) { + var ( + buf [binary.MaxVarintLen64]byte + idEncoded = buf[:] + ) + idEncoded = idEncoded[:binary.PutUvarint(idEncoded, id)] + + if len(idEncoded) == 0 { + return nil, fmt.Errorf("failed encoding id = %v", id) + } + return idEncoded, nil +} + +func adaptSnapshot(info snapshots.Info) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "kind": + switch info.Kind { + case snapshots.KindActive: + return "active", true + case snapshots.KindView: + return "view", true + case snapshots.KindCommitted: + return "committed", true + } + case "name": + return info.Name, true + case "parent": + return info.Parent, true + case "labels": + if len(info.Labels) == 0 { + return "", false + } + + v, ok := info.Labels[strings.Join(fieldpath[1:], ".")] + return v, ok + } + + return "", false + }) +} diff --git a/vendor/github.com/containerd/containerd/snapshots/storage/metastore.go b/vendor/github.com/containerd/containerd/snapshots/storage/metastore.go new file mode 100644 index 00000000..69ba3ea9 --- /dev/null +++ b/vendor/github.com/containerd/containerd/snapshots/storage/metastore.go @@ -0,0 +1,115 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package storage provides a metadata storage implementation for snapshot +// drivers. Drive implementations are responsible for starting and managing +// transactions using the defined context creator. This storage package uses +// BoltDB for storing metadata. Access to the raw boltdb transaction is not +// provided, but the stored object is provided by the proto subpackage. +package storage + +import ( + "context" + "sync" + + "github.com/containerd/containerd/snapshots" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +// Transactor is used to finalize an active transaction. +type Transactor interface { + // Commit commits any changes made during the transaction. On error a + // caller is expected to clean up any resources which would have relied + // on data mutated as part of this transaction. Only writable + // transactions can commit, non-writable must call Rollback. + Commit() error + + // Rollback rolls back any changes made during the transaction. This + // must be called on all non-writable transactions and aborted writable + // transaction. + Rollback() error +} + +// Snapshot hold the metadata for an active or view snapshot transaction. The +// ParentIDs hold the snapshot identifiers for the committed snapshots this +// active or view is based on. The ParentIDs are ordered from the lowest base +// to highest, meaning they should be applied in order from the first index to +// the last index. The last index should always be considered the active +// snapshots immediate parent. +type Snapshot struct { + Kind snapshots.Kind + ID string + ParentIDs []string +} + +// MetaStore is used to store metadata related to a snapshot driver. The +// MetaStore is intended to store metadata related to name, state and +// parentage. Using the MetaStore is not required to implement a snapshot +// driver but can be used to handle the persistence and transactional +// complexities of a driver implementation. +type MetaStore struct { + dbfile string + + dbL sync.Mutex + db *bolt.DB +} + +// NewMetaStore returns a snapshot MetaStore for storage of metadata related to +// a snapshot driver backed by a bolt file database. This implementation is +// strongly consistent and does all metadata changes in a transaction to prevent +// against process crashes causing inconsistent metadata state. +func NewMetaStore(dbfile string) (*MetaStore, error) { + return &MetaStore{ + dbfile: dbfile, + }, nil +} + +type transactionKey struct{} + +// TransactionContext creates a new transaction context. The writable value +// should be set to true for transactions which are expected to mutate data. +func (ms *MetaStore) TransactionContext(ctx context.Context, writable bool) (context.Context, Transactor, error) { + ms.dbL.Lock() + if ms.db == nil { + db, err := bolt.Open(ms.dbfile, 0600, nil) + if err != nil { + ms.dbL.Unlock() + return ctx, nil, errors.Wrap(err, "failed to open database file") + } + ms.db = db + } + ms.dbL.Unlock() + + tx, err := ms.db.Begin(writable) + if err != nil { + return ctx, nil, errors.Wrap(err, "failed to start transaction") + } + + ctx = context.WithValue(ctx, transactionKey{}, tx) + + return ctx, tx, nil +} + +// Close closes the metastore and any underlying database connections +func (ms *MetaStore) Close() error { + ms.dbL.Lock() + defer ms.dbL.Unlock() + if ms.db == nil { + return nil + } + return ms.db.Close() +} diff --git a/vendor/github.com/containerd/containerd/sys/mount_linux.go b/vendor/github.com/containerd/containerd/sys/mount_linux.go index a2104552..a9eee9b7 100644 --- a/vendor/github.com/containerd/containerd/sys/mount_linux.go +++ b/vendor/github.com/containerd/containerd/sys/mount_linux.go @@ -21,7 +21,6 @@ import ( "syscall" "unsafe" - "github.com/containerd/containerd/log" "github.com/pkg/errors" "golang.org/x/sys/unix" ) @@ -31,8 +30,9 @@ func FMountat(dirfd uintptr, source, target, fstype string, flags uintptr, data var ( sourceP, targetP, fstypeP, dataP *byte pid uintptr + ws unix.WaitStatus err error - errno, status syscall.Errno + errno syscall.Errno ) sourceP, err = syscall.BytePtrFromString(source) @@ -60,62 +60,37 @@ func FMountat(dirfd uintptr, source, target, fstype string, flags uintptr, data runtime.LockOSThread() defer runtime.UnlockOSThread() - var pipefds [2]int - if err := syscall.Pipe2(pipefds[:], syscall.O_CLOEXEC); err != nil { - return errors.Wrap(err, "failed to open pipe") - } - - defer func() { - // close both ends of the pipe in a deferred function, since open file - // descriptor table is shared with child - syscall.Close(pipefds[0]) - syscall.Close(pipefds[1]) - }() - pid, errno = forkAndMountat(dirfd, uintptr(unsafe.Pointer(sourceP)), uintptr(unsafe.Pointer(targetP)), uintptr(unsafe.Pointer(fstypeP)), flags, - uintptr(unsafe.Pointer(dataP)), - pipefds[1], - ) + uintptr(unsafe.Pointer(dataP))) if errno != 0 { return errors.Wrap(errno, "failed to fork thread") } - defer func() { - _, err := unix.Wait4(int(pid), nil, 0, nil) - for err == syscall.EINTR { - _, err = unix.Wait4(int(pid), nil, 0, nil) - } + _, err = unix.Wait4(int(pid), &ws, 0, nil) + for err == syscall.EINTR { + _, err = unix.Wait4(int(pid), &ws, 0, nil) + } - if err != nil { - log.L.WithError(err).Debugf("failed to find pid=%d process", pid) - } - }() + if err != nil { + return errors.Wrapf(err, "failed to find pid=%d process", pid) + } - _, _, errno = syscall.RawSyscall(syscall.SYS_READ, - uintptr(pipefds[0]), - uintptr(unsafe.Pointer(&status)), - unsafe.Sizeof(status)) + errno = syscall.Errno(ws.ExitStatus()) if errno != 0 { - return errors.Wrap(errno, "failed to read pipe") + return errors.Wrap(errno, "failed to mount") } - - if status != 0 { - return errors.Wrap(status, "failed to mount") - } - return nil } // forkAndMountat will fork thread, change working dir and mount. // // precondition: the runtime OS thread must be locked. -func forkAndMountat(dirfd uintptr, source, target, fstype, flags, data uintptr, pipefd int) (pid uintptr, errno syscall.Errno) { - +func forkAndMountat(dirfd uintptr, source, target, fstype, flags, data uintptr) (pid uintptr, errno syscall.Errno) { // block signal during clone beforeFork() @@ -139,7 +114,6 @@ func forkAndMountat(dirfd uintptr, source, target, fstype, flags, data uintptr, _, _, errno = syscall.RawSyscall6(syscall.SYS_MOUNT, source, target, fstype, flags, data, 0) childerr: - _, _, errno = syscall.RawSyscall(syscall.SYS_WRITE, uintptr(pipefd), uintptr(unsafe.Pointer(&errno)), unsafe.Sizeof(errno)) syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0) panic("unreachable") } diff --git a/vendor/github.com/containerd/containerd/sys/socket_unix.go b/vendor/github.com/containerd/containerd/sys/socket_unix.go index 90fa55c4..b67cc1fa 100644 --- a/vendor/github.com/containerd/containerd/sys/socket_unix.go +++ b/vendor/github.com/containerd/containerd/sys/socket_unix.go @@ -68,11 +68,11 @@ func GetLocalListener(path string, uid, gid int) (net.Listener, error) { } func mkdirAs(path string, uid, gid int) error { - if _, err := os.Stat(path); err == nil || !os.IsNotExist(err) { + if _, err := os.Stat(path); !os.IsNotExist(err) { return err } - if err := os.Mkdir(path, 0770); err != nil { + if err := os.MkdirAll(path, 0770); err != nil { return err } diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go new file mode 100644 index 00000000..aa250aa5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -0,0 +1,34 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package version + +import "runtime" + +var ( + // Package is filled at linking time + Package = "github.com/containerd/containerd" + + // Version holds the complete version number. Filled in at linking time. + Version = "1.3.0+unknown" + + // Revision is filled with the VCS (e.g. git) revision being used to build + // the program at linking time. + Revision = "" + + // GoVersion is Go tree's version. + GoVersion = runtime.Version() +) diff --git a/vendor/github.com/containerd/go-cni/.travis.yml b/vendor/github.com/containerd/go-cni/.travis.yml new file mode 100644 index 00000000..48fbb6dc --- /dev/null +++ b/vendor/github.com/containerd/go-cni/.travis.yml @@ -0,0 +1,23 @@ +language: go +go: + - 1.12.x + - tip + +go_import_path: github.com/containerd/go-cni + +install: + - go get -d + - env GO111MODULE=off go get -u github.com/vbatts/git-validation + - env GO111MODULE=off go get -u github.com/kunalkushwaha/ltag + +before_script: + - pushd ..; git clone https://github.com/containerd/project; popd + +script: + - DCO_VERBOSITY=-q ../project/script/validate/dco + - ../project/script/validate/fileheader ../project/ + - env GO111MODULE=on ../project/script/validate/vendor + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/containerd/go-cni/LICENSE b/vendor/github.com/containerd/go-cni/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/containerd/go-cni/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/go-cni/README.md b/vendor/github.com/containerd/go-cni/README.md new file mode 100644 index 00000000..3b1a4aa7 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/README.md @@ -0,0 +1,60 @@ +[![Build Status](https://travis-ci.org/containerd/go-cni.svg?branch=master)](https://travis-ci.org/containerd/go-cni) [![GoDoc](https://godoc.org/github.com/containerd/go-cni?status.svg)](https://godoc.org/github.com/containerd/go-cni) + +# go-cni + +A generic CNI library to provide APIs for CNI plugin interactions. The library provides APIs to: + +- Load CNI network config from different sources +- Setup networks for container namespace +- Remove networks from container namespace +- Query status of CNI network plugin initialization + +go-cni aims to support plugins that implement [Container Network Interface](https://github.com/containernetworking/cni) + +## Usage +```go +func main() { + id := "123456" + netns := "/proc/9999/ns/net" + defaultIfName := "eth0" + // Initialize library + l = gocni.New(gocni.WithMinNetworkCount(2), + gocni.WithPluginConfDir("/etc/mycni/net.d"), + gocni.WithPluginDir([]string{"/opt/mycni/bin", "/opt/cni/bin"}), + gocni.WithDefaultIfName(defaultIfName)) + + // Load the cni configuration + err:= l.Load(gocni.WithLoNetwork, gocni.WithDefaultConf) + if err != nil{ + log.Errorf("failed to load cni configuration: %v", err) + return + } + + // Setup network for namespace. + labels := map[string]string{ + "K8S_POD_NAMESPACE": "namespace1", + "K8S_POD_NAME": "pod1", + "K8S_POD_INFRA_CONTAINER_ID": id, + } + result, err := l.Setup(id, netns, gocni.WithLabels(labels)) + if err != nil { + log.Errorf("failed to setup network for namespace %q: %v",id, err) + return + } + + // Get IP of the default interface + IP := result.Interfaces[defaultIfName].IPConfigs[0].IP.String() + fmt.Printf("IP of the default interface %s:%s", defaultIfName, IP) +} +``` + +## Project details + +The go-cni is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + + * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/go-cni/cni.go b/vendor/github.com/containerd/go-cni/cni.go new file mode 100644 index 00000000..8acc83b3 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/cni.go @@ -0,0 +1,220 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import ( + "context" + "fmt" + "strings" + "sync" + + cnilibrary "github.com/containernetworking/cni/libcni" + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/current" + "github.com/pkg/errors" +) + +type CNI interface { + // Setup setup the network for the namespace + Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*CNIResult, error) + // Remove tears down the network of the namespace. + Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error + // Load loads the cni network config + Load(opts ...CNIOpt) error + // Status checks the status of the cni initialization + Status() error + // GetConfig returns a copy of the CNI plugin configurations as parsed by CNI + GetConfig() *ConfigResult +} + +type ConfigResult struct { + PluginDirs []string + PluginConfDir string + PluginMaxConfNum int + Prefix string + Networks []*ConfNetwork +} + +type ConfNetwork struct { + Config *NetworkConfList + IFName string +} + +// NetworkConfList is a source bytes to string version of cnilibrary.NetworkConfigList +type NetworkConfList struct { + Name string + CNIVersion string + Plugins []*NetworkConf + Source string +} + +// NetworkConf is a source bytes to string conversion of cnilibrary.NetworkConfig +type NetworkConf struct { + Network *types.NetConf + Source string +} + +type libcni struct { + config + + cniConfig cnilibrary.CNI + networkCount int // minimum network plugin configurations needed to initialize cni + networks []*Network + sync.RWMutex +} + +func defaultCNIConfig() *libcni { + return &libcni{ + config: config{ + pluginDirs: []string{DefaultCNIDir}, + pluginConfDir: DefaultNetDir, + pluginMaxConfNum: DefaultMaxConfNum, + prefix: DefaultPrefix, + }, + cniConfig: &cnilibrary.CNIConfig{ + Path: []string{DefaultCNIDir}, + }, + networkCount: 1, + } +} + +// New creates a new libcni instance. +func New(config ...CNIOpt) (CNI, error) { + cni := defaultCNIConfig() + var err error + for _, c := range config { + if err = c(cni); err != nil { + return nil, err + } + } + return cni, nil +} + +// Load loads the latest config from cni config files. +func (c *libcni) Load(opts ...CNIOpt) error { + var err error + c.Lock() + defer c.Unlock() + // Reset the networks on a load operation to ensure + // config happens on a clean slate + c.reset() + + for _, o := range opts { + if err = o(c); err != nil { + return errors.Wrapf(ErrLoad, fmt.Sprintf("cni config load failed: %v", err)) + } + } + return nil +} + +// Status returns the status of CNI initialization. +func (c *libcni) Status() error { + c.RLock() + defer c.RUnlock() + if len(c.networks) < c.networkCount { + return ErrCNINotInitialized + } + return nil +} + +// Networks returns all the configured networks. +// NOTE: Caller MUST NOT modify anything in the returned array. +func (c *libcni) Networks() []*Network { + c.RLock() + defer c.RUnlock() + return append([]*Network{}, c.networks...) +} + +// Setup setups the network in the namespace +func (c *libcni) Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*CNIResult, error) { + if err := c.Status(); err != nil { + return nil, err + } + ns, err := newNamespace(id, path, opts...) + if err != nil { + return nil, err + } + var results []*current.Result + for _, network := range c.Networks() { + r, err := network.Attach(ctx, ns) + if err != nil { + return nil, err + } + results = append(results, r) + } + return c.GetCNIResultFromResults(results) +} + +// Remove removes the network config from the namespace +func (c *libcni) Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error { + if err := c.Status(); err != nil { + return err + } + ns, err := newNamespace(id, path, opts...) + if err != nil { + return err + } + for _, network := range c.Networks() { + if err := network.Remove(ctx, ns); err != nil { + // Based on CNI spec v0.7.0, empty network namespace is allowed to + // do best effort cleanup. However, it is not handled consistently + // right now: + // https://github.com/containernetworking/plugins/issues/210 + // TODO(random-liu): Remove the error handling when the issue is + // fixed and the CNI spec v0.6.0 support is deprecated. + if path == "" && strings.Contains(err.Error(), "no such file or directory") { + continue + } + return err + } + } + return nil +} + +// GetConfig returns a copy of the CNI plugin configurations as parsed by CNI +func (c *libcni) GetConfig() *ConfigResult { + c.RLock() + defer c.RUnlock() + r := &ConfigResult{ + PluginDirs: c.config.pluginDirs, + PluginConfDir: c.config.pluginConfDir, + PluginMaxConfNum: c.config.pluginMaxConfNum, + Prefix: c.config.prefix, + } + for _, network := range c.networks { + conf := &NetworkConfList{ + Name: network.config.Name, + CNIVersion: network.config.CNIVersion, + Source: string(network.config.Bytes), + } + for _, plugin := range network.config.Plugins { + conf.Plugins = append(conf.Plugins, &NetworkConf{ + Network: plugin.Network, + Source: string(plugin.Bytes), + }) + } + r.Networks = append(r.Networks, &ConfNetwork{ + Config: conf, + IFName: network.ifName, + }) + } + return r +} + +func (c *libcni) reset() { + c.networks = nil +} diff --git a/vendor/github.com/containerd/go-cni/errors.go b/vendor/github.com/containerd/go-cni/errors.go new file mode 100644 index 00000000..28761711 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/errors.go @@ -0,0 +1,55 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import ( + "github.com/pkg/errors" +) + +var ( + ErrCNINotInitialized = errors.New("cni plugin not initialized") + ErrInvalidConfig = errors.New("invalid cni config") + ErrNotFound = errors.New("not found") + ErrRead = errors.New("failed to read config file") + ErrInvalidResult = errors.New("invalid result") + ErrLoad = errors.New("failed to load cni config") +) + +// IsCNINotInitialized returns true if the error is due to cni config not being initialized +func IsCNINotInitialized(err error) bool { + return errors.Cause(err) == ErrCNINotInitialized +} + +// IsInvalidConfig returns true if the error is invalid cni config +func IsInvalidConfig(err error) bool { + return errors.Cause(err) == ErrInvalidConfig +} + +// IsNotFound returns true if the error is due to a missing config or result +func IsNotFound(err error) bool { + return errors.Cause(err) == ErrNotFound +} + +// IsReadFailure return true if the error is a config read failure +func IsReadFailure(err error) bool { + return errors.Cause(err) == ErrRead +} + +// IsInvalidResult return true if the error is due to invalid cni result +func IsInvalidResult(err error) bool { + return errors.Cause(err) == ErrInvalidResult +} diff --git a/vendor/github.com/containerd/go-cni/go.mod b/vendor/github.com/containerd/go-cni/go.mod new file mode 100644 index 00000000..f3165f4a --- /dev/null +++ b/vendor/github.com/containerd/go-cni/go.mod @@ -0,0 +1,14 @@ +module github.com/containerd/go-cni + +require ( + github.com/containernetworking/cni v0.7.1 + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/onsi/ginkgo v1.10.3 // indirect + github.com/onsi/gomega v1.7.1 // indirect + github.com/pkg/errors v0.8.0 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f // indirect + github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d +) + +go 1.12 diff --git a/vendor/github.com/containerd/go-cni/go.sum b/vendor/github.com/containerd/go-cni/go.sum new file mode 100644 index 00000000..a0890f99 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/go.sum @@ -0,0 +1,39 @@ +github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f h1:SrOsK2rwonEK9IsdNEU61zcTdKW68/PuV9wuHHpqngk= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d h1:YCdGqZILKLGzbyEYbdau30JBEXbKaKYmkBDU5JUW3D0= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/containerd/go-cni/helper.go b/vendor/github.com/containerd/go-cni/helper.go new file mode 100644 index 00000000..088cb9bc --- /dev/null +++ b/vendor/github.com/containerd/go-cni/helper.go @@ -0,0 +1,41 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import ( + "fmt" + + "github.com/containernetworking/cni/pkg/types/current" +) + +func validateInterfaceConfig(ipConf *current.IPConfig, ifs int) error { + if ipConf == nil { + return fmt.Errorf("invalid IP configuration (nil)") + } + if ipConf.Interface != nil && *ipConf.Interface > ifs { + return fmt.Errorf("invalid IP configuration (interface number %d is > number of interfaces %d)", *ipConf.Interface, ifs) + } + return nil +} + +func getIfName(prefix string, i int) string { + return fmt.Sprintf("%s%d", prefix, i) +} + +func defaultInterface(prefix string) string { + return getIfName(prefix, 0) +} diff --git a/vendor/github.com/containerd/go-cni/namespace.go b/vendor/github.com/containerd/go-cni/namespace.go new file mode 100644 index 00000000..ff14b01c --- /dev/null +++ b/vendor/github.com/containerd/go-cni/namespace.go @@ -0,0 +1,77 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import ( + "context" + + cnilibrary "github.com/containernetworking/cni/libcni" + "github.com/containernetworking/cni/pkg/types/current" +) + +type Network struct { + cni cnilibrary.CNI + config *cnilibrary.NetworkConfigList + ifName string +} + +func (n *Network) Attach(ctx context.Context, ns *Namespace) (*current.Result, error) { + r, err := n.cni.AddNetworkList(ctx, n.config, ns.config(n.ifName)) + if err != nil { + return nil, err + } + return current.NewResultFromResult(r) +} + +func (n *Network) Remove(ctx context.Context, ns *Namespace) error { + return n.cni.DelNetworkList(ctx, n.config, ns.config(n.ifName)) +} + +type Namespace struct { + id string + path string + capabilityArgs map[string]interface{} + args map[string]string +} + +func newNamespace(id, path string, opts ...NamespaceOpts) (*Namespace, error) { + ns := &Namespace{ + id: id, + path: path, + capabilityArgs: make(map[string]interface{}), + args: make(map[string]string), + } + for _, o := range opts { + if err := o(ns); err != nil { + return nil, err + } + } + return ns, nil +} + +func (ns *Namespace) config(ifName string) *cnilibrary.RuntimeConf { + c := &cnilibrary.RuntimeConf{ + ContainerID: ns.id, + NetNS: ns.path, + IfName: ifName, + } + for k, v := range ns.args { + c.Args = append(c.Args, [2]string{k, v}) + } + c.CapabilityArgs = ns.capabilityArgs + return c +} diff --git a/vendor/github.com/containerd/go-cni/namespace_opts.go b/vendor/github.com/containerd/go-cni/namespace_opts.go new file mode 100644 index 00000000..1fad5f69 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/namespace_opts.go @@ -0,0 +1,75 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +type NamespaceOpts func(s *Namespace) error + +// Capabilities +func WithCapabilityPortMap(portMapping []PortMapping) NamespaceOpts { + return func(c *Namespace) error { + c.capabilityArgs["portMappings"] = portMapping + return nil + } +} + +func WithCapabilityIPRanges(ipRanges []IPRanges) NamespaceOpts { + return func(c *Namespace) error { + c.capabilityArgs["ipRanges"] = ipRanges + return nil + } +} + +// WithCapabilityBandWitdh adds support for traffic shaping: +// https://github.com/heptio/cni-plugins/tree/master/plugins/meta/bandwidth +func WithCapabilityBandWidth(bandWidth BandWidth) NamespaceOpts { + return func(c *Namespace) error { + c.capabilityArgs["bandwidth"] = bandWidth + return nil + } +} + +// WithCapabilityDNS adds support for dns +func WithCapabilityDNS(dns DNS) NamespaceOpts { + return func(c *Namespace) error { + c.capabilityArgs["dns"] = dns + return nil + } +} + +func WithCapability(name string, capability interface{}) NamespaceOpts { + return func(c *Namespace) error { + c.capabilityArgs[name] = capability + return nil + } +} + +// Args +func WithLabels(labels map[string]string) NamespaceOpts { + return func(c *Namespace) error { + for k, v := range labels { + c.args[k] = v + } + return nil + } +} + +func WithArgs(k, v string) NamespaceOpts { + return func(c *Namespace) error { + c.args[k] = v + return nil + } +} diff --git a/vendor/github.com/containerd/go-cni/opts.go b/vendor/github.com/containerd/go-cni/opts.go new file mode 100644 index 00000000..1dd7869a --- /dev/null +++ b/vendor/github.com/containerd/go-cni/opts.go @@ -0,0 +1,263 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import ( + "sort" + "strings" + + cnilibrary "github.com/containernetworking/cni/libcni" + "github.com/pkg/errors" +) + +type CNIOpt func(c *libcni) error + +// WithInterfacePrefix sets the prefix for network interfaces +// e.g. eth or wlan +func WithInterfacePrefix(prefix string) CNIOpt { + return func(c *libcni) error { + c.prefix = prefix + return nil + } +} + +// WithPluginDir can be used to set the locations of +// the cni plugin binaries +func WithPluginDir(dirs []string) CNIOpt { + return func(c *libcni) error { + c.pluginDirs = dirs + c.cniConfig = &cnilibrary.CNIConfig{Path: dirs} + return nil + } +} + +// WithPluginConfDir can be used to configure the +// cni configuration directory. +func WithPluginConfDir(dir string) CNIOpt { + return func(c *libcni) error { + c.pluginConfDir = dir + return nil + } +} + +// WithPluginMaxConfNum can be used to configure the +// max cni plugin config file num. +func WithPluginMaxConfNum(max int) CNIOpt { + return func(c *libcni) error { + c.pluginMaxConfNum = max + return nil + } +} + +// WithMinNetworkCount can be used to configure the +// minimum networks to be configured and initialized +// for the status to report success. By default its 1. +func WithMinNetworkCount(count int) CNIOpt { + return func(c *libcni) error { + c.networkCount = count + return nil + } +} + +// WithLoNetwork can be used to load the loopback +// network config. +func WithLoNetwork(c *libcni) error { + loConfig, _ := cnilibrary.ConfListFromBytes([]byte(`{ +"cniVersion": "0.3.1", +"name": "cni-loopback", +"plugins": [{ + "type": "loopback" +}] +}`)) + + c.networks = append(c.networks, &Network{ + cni: c.cniConfig, + config: loConfig, + ifName: "lo", + }) + return nil +} + +// WithConf can be used to load config directly +// from byte. +func WithConf(bytes []byte) CNIOpt { + return WithConfIndex(bytes, 0) +} + +// WithConfIndex can be used to load config directly +// from byte and set the interface name's index. +func WithConfIndex(bytes []byte, index int) CNIOpt { + return func(c *libcni) error { + conf, err := cnilibrary.ConfFromBytes(bytes) + if err != nil { + return err + } + confList, err := cnilibrary.ConfListFromConf(conf) + if err != nil { + return err + } + c.networks = append(c.networks, &Network{ + cni: c.cniConfig, + config: confList, + ifName: getIfName(c.prefix, index), + }) + return nil + } +} + +// WithConfFile can be used to load network config +// from an .conf file. Supported with absolute fileName +// with path only. +func WithConfFile(fileName string) CNIOpt { + return func(c *libcni) error { + conf, err := cnilibrary.ConfFromFile(fileName) + if err != nil { + return err + } + // upconvert to conf list + confList, err := cnilibrary.ConfListFromConf(conf) + if err != nil { + return err + } + c.networks = append(c.networks, &Network{ + cni: c.cniConfig, + config: confList, + ifName: getIfName(c.prefix, 0), + }) + return nil + } +} + +// WithConfListBytes can be used to load network config list directly +// from byte +func WithConfListBytes(bytes []byte) CNIOpt { + return func(c *libcni) error { + confList, err := cnilibrary.ConfListFromBytes(bytes) + if err != nil { + return err + } + i := len(c.networks) + c.networks = append(c.networks, &Network{ + cni: c.cniConfig, + config: confList, + ifName: getIfName(c.prefix, i), + }) + return nil + } +} + +// WithConfListFile can be used to load network config +// from an .conflist file. Supported with absolute fileName +// with path only. +func WithConfListFile(fileName string) CNIOpt { + return func(c *libcni) error { + confList, err := cnilibrary.ConfListFromFile(fileName) + if err != nil { + return err + } + i := len(c.networks) + c.networks = append(c.networks, &Network{ + cni: c.cniConfig, + config: confList, + ifName: getIfName(c.prefix, i), + }) + return nil + } +} + +// WithDefaultConf can be used to detect the default network +// config file from the configured cni config directory and load +// it. +// Since the CNI spec does not specify a way to detect default networks, +// the convention chosen is - the first network configuration in the sorted +// list of network conf files as the default network. +func WithDefaultConf(c *libcni) error { + return loadFromConfDir(c, c.pluginMaxConfNum) +} + +// WithAllConf can be used to detect all network config +// files from the configured cni config directory and load +// them. +func WithAllConf(c *libcni) error { + return loadFromConfDir(c, 0) +} + +// loadFromConfDir detects network config files from the +// configured cni config directory and load them. max is +// the maximum network config to load (max i<= 0 means no limit). +func loadFromConfDir(c *libcni, max int) error { + files, err := cnilibrary.ConfFiles(c.pluginConfDir, []string{".conf", ".conflist", ".json"}) + switch { + case err != nil: + return errors.Wrapf(ErrRead, "failed to read config file: %v", err) + case len(files) == 0: + return errors.Wrapf(ErrCNINotInitialized, "no network config found in %s", c.pluginConfDir) + } + + // files contains the network config files associated with cni network. + // Use lexicographical way as a defined order for network config files. + sort.Strings(files) + // Since the CNI spec does not specify a way to detect default networks, + // the convention chosen is - the first network configuration in the sorted + // list of network conf files as the default network and choose the default + // interface provided during init as the network interface for this default + // network. For every other network use a generated interface id. + i := 0 + var networks []*Network + for _, confFile := range files { + var confList *cnilibrary.NetworkConfigList + if strings.HasSuffix(confFile, ".conflist") { + confList, err = cnilibrary.ConfListFromFile(confFile) + if err != nil { + return errors.Wrapf(ErrInvalidConfig, "failed to load CNI config list file %s: %v", confFile, err) + } + } else { + conf, err := cnilibrary.ConfFromFile(confFile) + if err != nil { + return errors.Wrapf(ErrInvalidConfig, "failed to load CNI config file %s: %v", confFile, err) + } + // Ensure the config has a "type" so we know what plugin to run. + // Also catches the case where somebody put a conflist into a conf file. + if conf.Network.Type == "" { + return errors.Wrapf(ErrInvalidConfig, "network type not found in %s", confFile) + } + + confList, err = cnilibrary.ConfListFromConf(conf) + if err != nil { + return errors.Wrapf(ErrInvalidConfig, "failed to convert CNI config file %s to CNI config list: %v", confFile, err) + } + } + if len(confList.Plugins) == 0 { + return errors.Wrapf(ErrInvalidConfig, "CNI config list in config file %s has no networks, skipping", confFile) + + } + networks = append(networks, &Network{ + cni: c.cniConfig, + config: confList, + ifName: getIfName(c.prefix, i), + }) + i++ + if i == max { + break + } + } + if len(networks) == 0 { + return errors.Wrapf(ErrCNINotInitialized, "no valid networks found in %s", c.pluginDirs) + } + c.networks = append(c.networks, networks...) + return nil +} diff --git a/vendor/github.com/containerd/go-cni/result.go b/vendor/github.com/containerd/go-cni/result.go new file mode 100644 index 00000000..c2ac9486 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/result.go @@ -0,0 +1,106 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import ( + "net" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/current" + "github.com/pkg/errors" +) + +type IPConfig struct { + IP net.IP + Gateway net.IP +} + +type CNIResult struct { + Interfaces map[string]*Config + DNS []types.DNS + Routes []*types.Route +} + +type Config struct { + IPConfigs []*IPConfig + Mac string + Sandbox string +} + +// GetCNIResultFromResults returns a structured data containing the +// interface configuration for each of the interfaces created in the namespace. +// Conforms with +// Result: +// a) Interfaces list. Depending on the plugin, this can include the sandbox +// (eg, container or hypervisor) interface name and/or the host interface +// name, the hardware addresses of each interface, and details about the +// sandbox (if any) the interface is in. +// b) IP configuration assigned to each interface. The IPv4 and/or IPv6 addresses, +// gateways, and routes assigned to sandbox and/or host interfaces. +// c) DNS information. Dictionary that includes DNS information for nameservers, +// domain, search domains and options. +func (c *libcni) GetCNIResultFromResults(results []*current.Result) (*CNIResult, error) { + c.RLock() + defer c.RUnlock() + + r := &CNIResult{ + Interfaces: make(map[string]*Config), + } + + // Plugins may not need to return Interfaces in result if + // if there are no multiple interfaces created. In that case + // all configs should be applied against default interface + r.Interfaces[defaultInterface(c.prefix)] = &Config{} + + // Walk through all the results + for _, result := range results { + // Walk through all the interface in each result + for _, intf := range result.Interfaces { + r.Interfaces[intf.Name] = &Config{ + Mac: intf.Mac, + Sandbox: intf.Sandbox, + } + } + // Walk through all the IPs in the result and attach it to corresponding + // interfaces + for _, ipConf := range result.IPs { + if err := validateInterfaceConfig(ipConf, len(result.Interfaces)); err != nil { + return nil, errors.Wrapf(ErrInvalidResult, "invalid interface config: %v", err) + } + name := c.getInterfaceName(result.Interfaces, ipConf) + r.Interfaces[name].IPConfigs = append(r.Interfaces[name].IPConfigs, + &IPConfig{IP: ipConf.Address.IP, Gateway: ipConf.Gateway}) + } + r.DNS = append(r.DNS, result.DNS) + r.Routes = append(r.Routes, result.Routes...) + } + if _, ok := r.Interfaces[defaultInterface(c.prefix)]; !ok { + return nil, errors.Wrapf(ErrNotFound, "default network not found for: %s", defaultInterface(c.prefix)) + } + return r, nil +} + +// getInterfaceName returns the interface name if the plugins +// return the result with associated interfaces. If interface +// is not present then default interface name is used +func (c *libcni) getInterfaceName(interfaces []*current.Interface, + ipConf *current.IPConfig) string { + if ipConf.Interface != nil { + return interfaces[*ipConf.Interface].Name + } + return defaultInterface(c.prefix) +} diff --git a/vendor/github.com/containerd/go-cni/testutils.go b/vendor/github.com/containerd/go-cni/testutils.go new file mode 100644 index 00000000..d9453c8d --- /dev/null +++ b/vendor/github.com/containerd/go-cni/testutils.go @@ -0,0 +1,78 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "testing" +) + +func makeTmpDir(prefix string) (string, error) { + tmpDir, err := ioutil.TempDir(os.TempDir(), prefix) + if err != nil { + return "", err + } + return tmpDir, nil +} + +func makeFakeCNIConfig(t *testing.T) (string, string) { + cniDir, err := makeTmpDir("fakecni") + if err != nil { + t.Fatalf("Failed to create plugin config dir: %v", err) + } + + cniConfDir := path.Join(cniDir, "net.d") + err = os.MkdirAll(cniConfDir, 0777) + if err != nil { + t.Fatalf("Failed to create network config dir: %v", err) + } + + networkConfig1 := path.Join(cniConfDir, "mocknetwork1.conf") + f1, err := os.Create(networkConfig1) + if err != nil { + t.Fatalf("Failed to create network config %v: %v", f1, err) + } + networkConfig2 := path.Join(cniConfDir, "mocknetwork2.conf") + f2, err := os.Create(networkConfig2) + if err != nil { + t.Fatalf("Failed to create network config %v: %v", f2, err) + } + + cfg1 := fmt.Sprintf(`{ "name": "%s", "type": "%s", "capabilities": {"portMappings": true} }`, "plugin1", "fakecni") + _, err = f1.WriteString(cfg1) + if err != nil { + t.Fatalf("Failed to write network config file %v: %v", f1, err) + } + f1.Close() + cfg2 := fmt.Sprintf(`{ "name": "%s", "type": "%s", "capabilities": {"portMappings": true} }`, "plugin2", "fakecni") + _, err = f2.WriteString(cfg2) + if err != nil { + t.Fatalf("Failed to write network config file %v: %v", f2, err) + } + f2.Close() + return cniDir, cniConfDir +} + +func tearDownCNIConfig(t *testing.T, confDir string) { + err := os.RemoveAll(confDir) + if err != nil { + t.Fatalf("Failed to cleanup CNI configs: %v", err) + } +} diff --git a/vendor/github.com/containerd/go-cni/types.go b/vendor/github.com/containerd/go-cni/types.go new file mode 100644 index 00000000..0b7db1ee --- /dev/null +++ b/vendor/github.com/containerd/go-cni/types.go @@ -0,0 +1,65 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +const ( + CNIPluginName = "cni" + DefaultNetDir = "/etc/cni/net.d" + DefaultCNIDir = "/opt/cni/bin" + DefaultMaxConfNum = 1 + VendorCNIDirTemplate = "%s/opt/%s/bin" + DefaultPrefix = "eth" +) + +type config struct { + pluginDirs []string + pluginConfDir string + pluginMaxConfNum int + prefix string +} + +type PortMapping struct { + HostPort int32 + ContainerPort int32 + Protocol string + HostIP string +} + +type IPRanges struct { + Subnet string + RangeStart string + RangeEnd string + Gateway string +} + +// BandWidth defines the ingress/egress rate and burst limits +type BandWidth struct { + IngressRate uint64 + IngressBurst uint64 + EgressRate uint64 + EgressBurst uint64 +} + +// DNS defines the dns config +type DNS struct { + // List of DNS servers of the cluster. + Servers []string + // List of DNS search domains of the cluster. + Searches []string + // List of DNS options. + Options []string +} diff --git a/vendor/github.com/containerd/go-runc/.travis.yml b/vendor/github.com/containerd/go-runc/.travis.yml new file mode 100644 index 00000000..dd60e9ba --- /dev/null +++ b/vendor/github.com/containerd/go-runc/.travis.yml @@ -0,0 +1,20 @@ +language: go +go: + - 1.12.x + - 1.13.x + +install: + - go get -t ./... + - go get -u github.com/vbatts/git-validation + - go get -u github.com/kunalkushwaha/ltag + +before_script: + - pushd ..; git clone https://github.com/containerd/project; popd + +script: + - DCO_VERBOSITY=-q ../project/script/validate/dco + - ../project/script/validate/fileheader ../project/ + - go test -v -race -covermode=atomic -coverprofile=coverage.txt ./... + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/containerd/go-runc/LICENSE b/vendor/github.com/containerd/go-runc/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/containerd/go-runc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/go-runc/README.md b/vendor/github.com/containerd/go-runc/README.md new file mode 100644 index 00000000..c899bdd7 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/README.md @@ -0,0 +1,25 @@ +# go-runc + +[![Build Status](https://travis-ci.org/containerd/go-runc.svg?branch=master)](https://travis-ci.org/containerd/go-runc) +[![codecov](https://codecov.io/gh/containerd/go-runc/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/go-runc) + +This is a package for consuming the [runc](https://github.com/opencontainers/runc) binary in your Go applications. +It tries to expose all the settings and features of the runc CLI. If there is something missing then add it, its opensource! + +This needs runc @ [a9610f2c0](https://github.com/opencontainers/runc/commit/a9610f2c0237d2636d05a031ec8659a70e75ffeb) +or greater. + +## Docs + +Docs can be found at [godoc.org](https://godoc.org/github.com/containerd/go-runc). + +## Project details + +The go-runc is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + + * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/go-runc/command_linux.go b/vendor/github.com/containerd/go-runc/command_linux.go new file mode 100644 index 00000000..8a30f679 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/command_linux.go @@ -0,0 +1,56 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runc + +import ( + "context" + "os" + "os/exec" + "strings" + "syscall" +) + +func (r *Runc) command(context context.Context, args ...string) *exec.Cmd { + command := r.Command + if command == "" { + command = DefaultCommand + } + cmd := exec.CommandContext(context, command, append(r.args(), args...)...) + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: r.Setpgid, + } + cmd.Env = filterEnv(os.Environ(), "NOTIFY_SOCKET") // NOTIFY_SOCKET introduces a special behavior in runc but should only be set if invoked from systemd + if r.PdeathSignal != 0 { + cmd.SysProcAttr.Pdeathsig = r.PdeathSignal + } + + return cmd +} + +func filterEnv(in []string, names ...string) []string { + out := make([]string, 0, len(in)) +loop0: + for _, v := range in { + for _, k := range names { + if strings.HasPrefix(v, k+"=") { + continue loop0 + } + } + out = append(out, v) + } + return out +} diff --git a/vendor/github.com/containerd/go-runc/command_other.go b/vendor/github.com/containerd/go-runc/command_other.go new file mode 100644 index 00000000..b8fd4b86 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/command_other.go @@ -0,0 +1,35 @@ +// +build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runc + +import ( + "context" + "os" + "os/exec" +) + +func (r *Runc) command(context context.Context, args ...string) *exec.Cmd { + command := r.Command + if command == "" { + command = DefaultCommand + } + cmd := exec.CommandContext(context, command, append(r.args(), args...)...) + cmd.Env = os.Environ() + return cmd +} diff --git a/vendor/github.com/containerd/go-runc/console.go b/vendor/github.com/containerd/go-runc/console.go new file mode 100644 index 00000000..ff223e42 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/console.go @@ -0,0 +1,165 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runc + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + + "github.com/containerd/console" + "golang.org/x/sys/unix" +) + +// NewConsoleSocket creates a new unix socket at the provided path to accept a +// pty master created by runc for use by the container +func NewConsoleSocket(path string) (*Socket, error) { + abs, err := filepath.Abs(path) + if err != nil { + return nil, err + } + addr, err := net.ResolveUnixAddr("unix", abs) + if err != nil { + return nil, err + } + l, err := net.ListenUnix("unix", addr) + if err != nil { + return nil, err + } + return &Socket{ + l: l, + }, nil +} + +// NewTempConsoleSocket returns a temp console socket for use with a container +// On Close(), the socket is deleted +func NewTempConsoleSocket() (*Socket, error) { + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + dir, err := ioutil.TempDir(runtimeDir, "pty") + if err != nil { + return nil, err + } + abs, err := filepath.Abs(filepath.Join(dir, "pty.sock")) + if err != nil { + return nil, err + } + addr, err := net.ResolveUnixAddr("unix", abs) + if err != nil { + return nil, err + } + l, err := net.ListenUnix("unix", addr) + if err != nil { + return nil, err + } + if runtimeDir != "" { + if err := os.Chmod(abs, 0755|os.ModeSticky); err != nil { + return nil, err + } + } + return &Socket{ + l: l, + rmdir: true, + }, nil +} + +// Socket is a unix socket that accepts the pty master created by runc +type Socket struct { + rmdir bool + l *net.UnixListener +} + +// Path returns the path to the unix socket on disk +func (c *Socket) Path() string { + return c.l.Addr().String() +} + +// recvFd waits for a file descriptor to be sent over the given AF_UNIX +// socket. The file name of the remote file descriptor will be recreated +// locally (it is sent as non-auxiliary data in the same payload). +func recvFd(socket *net.UnixConn) (*os.File, error) { + const MaxNameLen = 4096 + var oobSpace = unix.CmsgSpace(4) + + name := make([]byte, MaxNameLen) + oob := make([]byte, oobSpace) + + n, oobn, _, _, err := socket.ReadMsgUnix(name, oob) + if err != nil { + return nil, err + } + + if n >= MaxNameLen || oobn != oobSpace { + return nil, fmt.Errorf("recvfd: incorrect number of bytes read (n=%d oobn=%d)", n, oobn) + } + + // Truncate. + name = name[:n] + oob = oob[:oobn] + + scms, err := unix.ParseSocketControlMessage(oob) + if err != nil { + return nil, err + } + if len(scms) != 1 { + return nil, fmt.Errorf("recvfd: number of SCMs is not 1: %d", len(scms)) + } + scm := scms[0] + + fds, err := unix.ParseUnixRights(&scm) + if err != nil { + return nil, err + } + if len(fds) != 1 { + return nil, fmt.Errorf("recvfd: number of fds is not 1: %d", len(fds)) + } + fd := uintptr(fds[0]) + + return os.NewFile(fd, string(name)), nil +} + +// ReceiveMaster blocks until the socket receives the pty master +func (c *Socket) ReceiveMaster() (console.Console, error) { + conn, err := c.l.Accept() + if err != nil { + return nil, err + } + defer conn.Close() + uc, ok := conn.(*net.UnixConn) + if !ok { + return nil, fmt.Errorf("received connection which was not a unix socket") + } + f, err := recvFd(uc) + if err != nil { + return nil, err + } + return console.ConsoleFromFile(f) +} + +// Close closes the unix socket +func (c *Socket) Close() error { + err := c.l.Close() + if c.rmdir { + if rerr := os.RemoveAll(filepath.Dir(c.Path())); err == nil { + err = rerr + } + } + return err +} diff --git a/vendor/github.com/containerd/go-runc/container.go b/vendor/github.com/containerd/go-runc/container.go new file mode 100644 index 00000000..107381a5 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/container.go @@ -0,0 +1,30 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runc + +import "time" + +// Container hold information for a runc container +type Container struct { + ID string `json:"id"` + Pid int `json:"pid"` + Status string `json:"status"` + Bundle string `json:"bundle"` + Rootfs string `json:"rootfs"` + Created time.Time `json:"created"` + Annotations map[string]string `json:"annotations"` +} diff --git a/vendor/github.com/containerd/go-runc/events.go b/vendor/github.com/containerd/go-runc/events.go new file mode 100644 index 00000000..d610aeb3 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/events.go @@ -0,0 +1,100 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runc + +type Event struct { + // Type are the event type generated by runc + // If the type is "error" then check the Err field on the event for + // the actual error + Type string `json:"type"` + ID string `json:"id"` + Stats *Stats `json:"data,omitempty"` + // Err has a read error if we were unable to decode the event from runc + Err error `json:"-"` +} + +type Stats struct { + Cpu Cpu `json:"cpu"` + Memory Memory `json:"memory"` + Pids Pids `json:"pids"` + Blkio Blkio `json:"blkio"` + Hugetlb map[string]Hugetlb `json:"hugetlb"` +} + +type Hugetlb struct { + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +type BlkioEntry struct { + Major uint64 `json:"major,omitempty"` + Minor uint64 `json:"minor,omitempty"` + Op string `json:"op,omitempty"` + Value uint64 `json:"value,omitempty"` +} + +type Blkio struct { + IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"` + IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"` + IoQueuedRecursive []BlkioEntry `json:"ioQueueRecursive,omitempty"` + IoServiceTimeRecursive []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"` + IoWaitTimeRecursive []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"` + IoMergedRecursive []BlkioEntry `json:"ioMergedRecursive,omitempty"` + IoTimeRecursive []BlkioEntry `json:"ioTimeRecursive,omitempty"` + SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"` +} + +type Pids struct { + Current uint64 `json:"current,omitempty"` + Limit uint64 `json:"limit,omitempty"` +} + +type Throttling struct { + Periods uint64 `json:"periods,omitempty"` + ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"` + ThrottledTime uint64 `json:"throttledTime,omitempty"` +} + +type CpuUsage struct { + // Units: nanoseconds. + Total uint64 `json:"total,omitempty"` + Percpu []uint64 `json:"percpu,omitempty"` + Kernel uint64 `json:"kernel"` + User uint64 `json:"user"` +} + +type Cpu struct { + Usage CpuUsage `json:"usage,omitempty"` + Throttling Throttling `json:"throttling,omitempty"` +} + +type MemoryEntry struct { + Limit uint64 `json:"limit"` + Usage uint64 `json:"usage,omitempty"` + Max uint64 `json:"max,omitempty"` + Failcnt uint64 `json:"failcnt"` +} + +type Memory struct { + Cache uint64 `json:"cache,omitempty"` + Usage MemoryEntry `json:"usage,omitempty"` + Swap MemoryEntry `json:"swap,omitempty"` + Kernel MemoryEntry `json:"kernel,omitempty"` + KernelTCP MemoryEntry `json:"kernelTCP,omitempty"` + Raw map[string]uint64 `json:"raw,omitempty"` +} diff --git a/vendor/github.com/containerd/go-runc/go.mod b/vendor/github.com/containerd/go-runc/go.mod new file mode 100644 index 00000000..d833ee16 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/go.mod @@ -0,0 +1,10 @@ +module github.com/containerd/go-runc + +go 1.13 + +require ( + github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e + github.com/opencontainers/runtime-spec v1.0.1 + github.com/pkg/errors v0.8.1 + golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 +) diff --git a/vendor/github.com/containerd/go-runc/go.sum b/vendor/github.com/containerd/go-runc/go.sum new file mode 100644 index 00000000..f7d00e37 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/go.sum @@ -0,0 +1,9 @@ +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e h1:GdiIYd8ZDOrT++e1NjhSD4rGt9zaJukHm4rt5F4mRQc= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/containerd/go-runc/io.go b/vendor/github.com/containerd/go-runc/io.go new file mode 100644 index 00000000..6cf0410c --- /dev/null +++ b/vendor/github.com/containerd/go-runc/io.go @@ -0,0 +1,218 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runc + +import ( + "io" + "os" + "os/exec" +) + +type IO interface { + io.Closer + Stdin() io.WriteCloser + Stdout() io.ReadCloser + Stderr() io.ReadCloser + Set(*exec.Cmd) +} + +type StartCloser interface { + CloseAfterStart() error +} + +// IOOpt sets I/O creation options +type IOOpt func(*IOOption) + +// IOOption holds I/O creation options +type IOOption struct { + OpenStdin bool + OpenStdout bool + OpenStderr bool +} + +func defaultIOOption() *IOOption { + return &IOOption{ + OpenStdin: true, + OpenStdout: true, + OpenStderr: true, + } +} + +func newPipe() (*pipe, error) { + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + return &pipe{ + r: r, + w: w, + }, nil +} + +type pipe struct { + r *os.File + w *os.File +} + +func (p *pipe) Close() error { + err := p.w.Close() + if rerr := p.r.Close(); err == nil { + err = rerr + } + return err +} + +type pipeIO struct { + in *pipe + out *pipe + err *pipe +} + +func (i *pipeIO) Stdin() io.WriteCloser { + if i.in == nil { + return nil + } + return i.in.w +} + +func (i *pipeIO) Stdout() io.ReadCloser { + if i.out == nil { + return nil + } + return i.out.r +} + +func (i *pipeIO) Stderr() io.ReadCloser { + if i.err == nil { + return nil + } + return i.err.r +} + +func (i *pipeIO) Close() error { + var err error + for _, v := range []*pipe{ + i.in, + i.out, + i.err, + } { + if v != nil { + if cerr := v.Close(); err == nil { + err = cerr + } + } + } + return err +} + +func (i *pipeIO) CloseAfterStart() error { + for _, f := range []*pipe{ + i.out, + i.err, + } { + if f != nil { + f.w.Close() + } + } + return nil +} + +// Set sets the io to the exec.Cmd +func (i *pipeIO) Set(cmd *exec.Cmd) { + if i.in != nil { + cmd.Stdin = i.in.r + } + if i.out != nil { + cmd.Stdout = i.out.w + } + if i.err != nil { + cmd.Stderr = i.err.w + } +} + +func NewSTDIO() (IO, error) { + return &stdio{}, nil +} + +type stdio struct { +} + +func (s *stdio) Close() error { + return nil +} + +func (s *stdio) Set(cmd *exec.Cmd) { + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr +} + +func (s *stdio) Stdin() io.WriteCloser { + return os.Stdin +} + +func (s *stdio) Stdout() io.ReadCloser { + return os.Stdout +} + +func (s *stdio) Stderr() io.ReadCloser { + return os.Stderr +} + +// NewNullIO returns IO setup for /dev/null use with runc +func NewNullIO() (IO, error) { + f, err := os.Open(os.DevNull) + if err != nil { + return nil, err + } + return &nullIO{ + devNull: f, + }, nil +} + +type nullIO struct { + devNull *os.File +} + +func (n *nullIO) Close() error { + // this should be closed after start but if not + // make sure we close the file but don't return the error + n.devNull.Close() + return nil +} + +func (n *nullIO) Stdin() io.WriteCloser { + return nil +} + +func (n *nullIO) Stdout() io.ReadCloser { + return nil +} + +func (n *nullIO) Stderr() io.ReadCloser { + return nil +} + +func (n *nullIO) Set(c *exec.Cmd) { + // don't set STDIN here + c.Stdout = n.devNull + c.Stderr = n.devNull +} + +func (n *nullIO) CloseAfterStart() error { + return n.devNull.Close() +} diff --git a/vendor/github.com/containerd/go-runc/io_unix.go b/vendor/github.com/containerd/go-runc/io_unix.go new file mode 100644 index 00000000..567cd072 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/io_unix.go @@ -0,0 +1,76 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runc + +import ( + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// NewPipeIO creates pipe pairs to be used with runc +func NewPipeIO(uid, gid int, opts ...IOOpt) (i IO, err error) { + option := defaultIOOption() + for _, o := range opts { + o(option) + } + var ( + pipes []*pipe + stdin, stdout, stderr *pipe + ) + // cleanup in case of an error + defer func() { + if err != nil { + for _, p := range pipes { + p.Close() + } + } + }() + if option.OpenStdin { + if stdin, err = newPipe(); err != nil { + return nil, err + } + pipes = append(pipes, stdin) + if err = unix.Fchown(int(stdin.r.Fd()), uid, gid); err != nil { + return nil, errors.Wrap(err, "failed to chown stdin") + } + } + if option.OpenStdout { + if stdout, err = newPipe(); err != nil { + return nil, err + } + pipes = append(pipes, stdout) + if err = unix.Fchown(int(stdout.w.Fd()), uid, gid); err != nil { + return nil, errors.Wrap(err, "failed to chown stdout") + } + } + if option.OpenStderr { + if stderr, err = newPipe(); err != nil { + return nil, err + } + pipes = append(pipes, stderr) + if err = unix.Fchown(int(stderr.w.Fd()), uid, gid); err != nil { + return nil, errors.Wrap(err, "failed to chown stderr") + } + } + return &pipeIO{ + in: stdin, + out: stdout, + err: stderr, + }, nil +} diff --git a/vendor/github.com/containerd/go-runc/io_windows.go b/vendor/github.com/containerd/go-runc/io_windows.go new file mode 100644 index 00000000..fc56ac4f --- /dev/null +++ b/vendor/github.com/containerd/go-runc/io_windows.go @@ -0,0 +1,62 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runc + +// NewPipeIO creates pipe pairs to be used with runc +func NewPipeIO(opts ...IOOpt) (i IO, err error) { + option := defaultIOOption() + for _, o := range opts { + o(option) + } + var ( + pipes []*pipe + stdin, stdout, stderr *pipe + ) + // cleanup in case of an error + defer func() { + if err != nil { + for _, p := range pipes { + p.Close() + } + } + }() + if option.OpenStdin { + if stdin, err = newPipe(); err != nil { + return nil, err + } + pipes = append(pipes, stdin) + } + if option.OpenStdout { + if stdout, err = newPipe(); err != nil { + return nil, err + } + pipes = append(pipes, stdout) + } + if option.OpenStderr { + if stderr, err = newPipe(); err != nil { + return nil, err + } + pipes = append(pipes, stderr) + } + return &pipeIO{ + in: stdin, + out: stdout, + err: stderr, + }, nil +} diff --git a/vendor/github.com/containerd/go-runc/monitor.go b/vendor/github.com/containerd/go-runc/monitor.go new file mode 100644 index 00000000..ff06a3fc --- /dev/null +++ b/vendor/github.com/containerd/go-runc/monitor.go @@ -0,0 +1,76 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runc + +import ( + "os/exec" + "syscall" + "time" +) + +var Monitor ProcessMonitor = &defaultMonitor{} + +type Exit struct { + Timestamp time.Time + Pid int + Status int +} + +// ProcessMonitor is an interface for process monitoring +// +// It allows daemons using go-runc to have a SIGCHLD handler +// to handle exits without introducing races between the handler +// and go's exec.Cmd +// These methods should match the methods exposed by exec.Cmd to provide +// a consistent experience for the caller +type ProcessMonitor interface { + Start(*exec.Cmd) (chan Exit, error) + Wait(*exec.Cmd, chan Exit) (int, error) +} + +type defaultMonitor struct { +} + +func (m *defaultMonitor) Start(c *exec.Cmd) (chan Exit, error) { + if err := c.Start(); err != nil { + return nil, err + } + ec := make(chan Exit, 1) + go func() { + var status int + if err := c.Wait(); err != nil { + status = 255 + if exitErr, ok := err.(*exec.ExitError); ok { + if ws, ok := exitErr.Sys().(syscall.WaitStatus); ok { + status = ws.ExitStatus() + } + } + } + ec <- Exit{ + Timestamp: time.Now(), + Pid: c.Process.Pid, + Status: status, + } + close(ec) + }() + return ec, nil +} + +func (m *defaultMonitor) Wait(c *exec.Cmd, ec chan Exit) (int, error) { + e := <-ec + return e.Status, nil +} diff --git a/vendor/github.com/containerd/go-runc/runc.go b/vendor/github.com/containerd/go-runc/runc.go new file mode 100644 index 00000000..c3a95af2 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/runc.go @@ -0,0 +1,715 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runc + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Format is the type of log formatting options avaliable +type Format string + +// TopBody represents the structured data of the full ps output +type TopResults struct { + // Processes running in the container, where each is process is an array of values corresponding to the headers + Processes [][]string `json:"Processes"` + + // Headers are the names of the columns + Headers []string `json:"Headers"` +} + +const ( + none Format = "" + JSON Format = "json" + Text Format = "text" + // DefaultCommand is the default command for Runc + DefaultCommand = "runc" +) + +// Runc is the client to the runc cli +type Runc struct { + //If command is empty, DefaultCommand is used + Command string + Root string + Debug bool + Log string + LogFormat Format + PdeathSignal syscall.Signal + Setpgid bool + Criu string + SystemdCgroup bool + Rootless *bool // nil stands for "auto" +} + +// List returns all containers created inside the provided runc root directory +func (r *Runc) List(context context.Context) ([]*Container, error) { + data, err := cmdOutput(r.command(context, "list", "--format=json"), false) + defer putBuf(data) + if err != nil { + return nil, err + } + var out []*Container + if err := json.Unmarshal(data.Bytes(), &out); err != nil { + return nil, err + } + return out, nil +} + +// State returns the state for the container provided by id +func (r *Runc) State(context context.Context, id string) (*Container, error) { + data, err := cmdOutput(r.command(context, "state", id), true) + defer putBuf(data) + if err != nil { + return nil, fmt.Errorf("%s: %s", err, data.String()) + } + var c Container + if err := json.Unmarshal(data.Bytes(), &c); err != nil { + return nil, err + } + return &c, nil +} + +type ConsoleSocket interface { + Path() string +} + +type CreateOpts struct { + IO + // PidFile is a path to where a pid file should be created + PidFile string + ConsoleSocket ConsoleSocket + Detach bool + NoPivot bool + NoNewKeyring bool + ExtraFiles []*os.File +} + +func (o *CreateOpts) args() (out []string, err error) { + if o.PidFile != "" { + abs, err := filepath.Abs(o.PidFile) + if err != nil { + return nil, err + } + out = append(out, "--pid-file", abs) + } + if o.ConsoleSocket != nil { + out = append(out, "--console-socket", o.ConsoleSocket.Path()) + } + if o.NoPivot { + out = append(out, "--no-pivot") + } + if o.NoNewKeyring { + out = append(out, "--no-new-keyring") + } + if o.Detach { + out = append(out, "--detach") + } + if o.ExtraFiles != nil { + out = append(out, "--preserve-fds", strconv.Itoa(len(o.ExtraFiles))) + } + return out, nil +} + +// Create creates a new container and returns its pid if it was created successfully +func (r *Runc) Create(context context.Context, id, bundle string, opts *CreateOpts) error { + args := []string{"create", "--bundle", bundle} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return err + } + args = append(args, oargs...) + } + cmd := r.command(context, append(args, id)...) + if opts != nil && opts.IO != nil { + opts.Set(cmd) + } + cmd.ExtraFiles = opts.ExtraFiles + + if cmd.Stdout == nil && cmd.Stderr == nil { + data, err := cmdOutput(cmd, true) + defer putBuf(data) + if err != nil { + return fmt.Errorf("%s: %s", err, data.String()) + } + return nil + } + ec, err := Monitor.Start(cmd) + if err != nil { + return err + } + if opts != nil && opts.IO != nil { + if c, ok := opts.IO.(StartCloser); ok { + if err := c.CloseAfterStart(); err != nil { + return err + } + } + } + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) + } + return err +} + +// Start will start an already created container +func (r *Runc) Start(context context.Context, id string) error { + return r.runOrError(r.command(context, "start", id)) +} + +type ExecOpts struct { + IO + PidFile string + ConsoleSocket ConsoleSocket + Detach bool +} + +func (o *ExecOpts) args() (out []string, err error) { + if o.ConsoleSocket != nil { + out = append(out, "--console-socket", o.ConsoleSocket.Path()) + } + if o.Detach { + out = append(out, "--detach") + } + if o.PidFile != "" { + abs, err := filepath.Abs(o.PidFile) + if err != nil { + return nil, err + } + out = append(out, "--pid-file", abs) + } + return out, nil +} + +// Exec executres and additional process inside the container based on a full +// OCI Process specification +func (r *Runc) Exec(context context.Context, id string, spec specs.Process, opts *ExecOpts) error { + f, err := ioutil.TempFile(os.Getenv("XDG_RUNTIME_DIR"), "runc-process") + if err != nil { + return err + } + defer os.Remove(f.Name()) + err = json.NewEncoder(f).Encode(spec) + f.Close() + if err != nil { + return err + } + args := []string{"exec", "--process", f.Name()} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return err + } + args = append(args, oargs...) + } + cmd := r.command(context, append(args, id)...) + if opts != nil && opts.IO != nil { + opts.Set(cmd) + } + if cmd.Stdout == nil && cmd.Stderr == nil { + data, err := cmdOutput(cmd, true) + defer putBuf(data) + if err != nil { + return fmt.Errorf("%s: %s", err, data.String()) + } + return nil + } + ec, err := Monitor.Start(cmd) + if err != nil { + return err + } + if opts != nil && opts.IO != nil { + if c, ok := opts.IO.(StartCloser); ok { + if err := c.CloseAfterStart(); err != nil { + return err + } + } + } + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) + } + return err +} + +// Run runs the create, start, delete lifecycle of the container +// and returns its exit status after it has exited +func (r *Runc) Run(context context.Context, id, bundle string, opts *CreateOpts) (int, error) { + args := []string{"run", "--bundle", bundle} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return -1, err + } + args = append(args, oargs...) + } + cmd := r.command(context, append(args, id)...) + if opts != nil && opts.IO != nil { + opts.Set(cmd) + } + ec, err := Monitor.Start(cmd) + if err != nil { + return -1, err + } + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) + } + return status, err +} + +type DeleteOpts struct { + Force bool +} + +func (o *DeleteOpts) args() (out []string) { + if o.Force { + out = append(out, "--force") + } + return out +} + +// Delete deletes the container +func (r *Runc) Delete(context context.Context, id string, opts *DeleteOpts) error { + args := []string{"delete"} + if opts != nil { + args = append(args, opts.args()...) + } + return r.runOrError(r.command(context, append(args, id)...)) +} + +// KillOpts specifies options for killing a container and its processes +type KillOpts struct { + All bool +} + +func (o *KillOpts) args() (out []string) { + if o.All { + out = append(out, "--all") + } + return out +} + +// Kill sends the specified signal to the container +func (r *Runc) Kill(context context.Context, id string, sig int, opts *KillOpts) error { + args := []string{ + "kill", + } + if opts != nil { + args = append(args, opts.args()...) + } + return r.runOrError(r.command(context, append(args, id, strconv.Itoa(sig))...)) +} + +// Stats return the stats for a container like cpu, memory, and io +func (r *Runc) Stats(context context.Context, id string) (*Stats, error) { + cmd := r.command(context, "events", "--stats", id) + rd, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + ec, err := Monitor.Start(cmd) + if err != nil { + return nil, err + } + defer func() { + rd.Close() + Monitor.Wait(cmd, ec) + }() + var e Event + if err := json.NewDecoder(rd).Decode(&e); err != nil { + return nil, err + } + return e.Stats, nil +} + +// Events returns an event stream from runc for a container with stats and OOM notifications +func (r *Runc) Events(context context.Context, id string, interval time.Duration) (chan *Event, error) { + cmd := r.command(context, "events", fmt.Sprintf("--interval=%ds", int(interval.Seconds())), id) + rd, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + ec, err := Monitor.Start(cmd) + if err != nil { + rd.Close() + return nil, err + } + var ( + dec = json.NewDecoder(rd) + c = make(chan *Event, 128) + ) + go func() { + defer func() { + close(c) + rd.Close() + Monitor.Wait(cmd, ec) + }() + for { + var e Event + if err := dec.Decode(&e); err != nil { + if err == io.EOF { + return + } + e = Event{ + Type: "error", + Err: err, + } + } + c <- &e + } + }() + return c, nil +} + +// Pause the container with the provided id +func (r *Runc) Pause(context context.Context, id string) error { + return r.runOrError(r.command(context, "pause", id)) +} + +// Resume the container with the provided id +func (r *Runc) Resume(context context.Context, id string) error { + return r.runOrError(r.command(context, "resume", id)) +} + +// Ps lists all the processes inside the container returning their pids +func (r *Runc) Ps(context context.Context, id string) ([]int, error) { + data, err := cmdOutput(r.command(context, "ps", "--format", "json", id), true) + defer putBuf(data) + if err != nil { + return nil, fmt.Errorf("%s: %s", err, data.String()) + } + var pids []int + if err := json.Unmarshal(data.Bytes(), &pids); err != nil { + return nil, err + } + return pids, nil +} + +// Top lists all the processes inside the container returning the full ps data +func (r *Runc) Top(context context.Context, id string, psOptions string) (*TopResults, error) { + data, err := cmdOutput(r.command(context, "ps", "--format", "table", id, psOptions), true) + defer putBuf(data) + if err != nil { + return nil, fmt.Errorf("%s: %s", err, data.String()) + } + + topResults, err := ParsePSOutput(data.Bytes()) + if err != nil { + return nil, fmt.Errorf("%s: ", err) + } + return topResults, nil +} + +type CheckpointOpts struct { + // ImagePath is the path for saving the criu image file + ImagePath string + // WorkDir is the working directory for criu + WorkDir string + // ParentPath is the path for previous image files from a pre-dump + ParentPath string + // AllowOpenTCP allows open tcp connections to be checkpointed + AllowOpenTCP bool + // AllowExternalUnixSockets allows external unix sockets to be checkpointed + AllowExternalUnixSockets bool + // AllowTerminal allows the terminal(pty) to be checkpointed with a container + AllowTerminal bool + // CriuPageServer is the address:port for the criu page server + CriuPageServer string + // FileLocks handle file locks held by the container + FileLocks bool + // Cgroups is the cgroup mode for how to handle the checkpoint of a container's cgroups + Cgroups CgroupMode + // EmptyNamespaces creates a namespace for the container but does not save its properties + // Provide the namespaces you wish to be checkpointed without their settings on restore + EmptyNamespaces []string +} + +type CgroupMode string + +const ( + Soft CgroupMode = "soft" + Full CgroupMode = "full" + Strict CgroupMode = "strict" +) + +func (o *CheckpointOpts) args() (out []string) { + if o.ImagePath != "" { + out = append(out, "--image-path", o.ImagePath) + } + if o.WorkDir != "" { + out = append(out, "--work-path", o.WorkDir) + } + if o.ParentPath != "" { + out = append(out, "--parent-path", o.ParentPath) + } + if o.AllowOpenTCP { + out = append(out, "--tcp-established") + } + if o.AllowExternalUnixSockets { + out = append(out, "--ext-unix-sk") + } + if o.AllowTerminal { + out = append(out, "--shell-job") + } + if o.CriuPageServer != "" { + out = append(out, "--page-server", o.CriuPageServer) + } + if o.FileLocks { + out = append(out, "--file-locks") + } + if string(o.Cgroups) != "" { + out = append(out, "--manage-cgroups-mode", string(o.Cgroups)) + } + for _, ns := range o.EmptyNamespaces { + out = append(out, "--empty-ns", ns) + } + return out +} + +type CheckpointAction func([]string) []string + +// LeaveRunning keeps the container running after the checkpoint has been completed +func LeaveRunning(args []string) []string { + return append(args, "--leave-running") +} + +// PreDump allows a pre-dump of the checkpoint to be made and completed later +func PreDump(args []string) []string { + return append(args, "--pre-dump") +} + +// Checkpoint allows you to checkpoint a container using criu +func (r *Runc) Checkpoint(context context.Context, id string, opts *CheckpointOpts, actions ...CheckpointAction) error { + args := []string{"checkpoint"} + if opts != nil { + args = append(args, opts.args()...) + } + for _, a := range actions { + args = a(args) + } + return r.runOrError(r.command(context, append(args, id)...)) +} + +type RestoreOpts struct { + CheckpointOpts + IO + + Detach bool + PidFile string + NoSubreaper bool + NoPivot bool + ConsoleSocket ConsoleSocket +} + +func (o *RestoreOpts) args() ([]string, error) { + out := o.CheckpointOpts.args() + if o.Detach { + out = append(out, "--detach") + } + if o.PidFile != "" { + abs, err := filepath.Abs(o.PidFile) + if err != nil { + return nil, err + } + out = append(out, "--pid-file", abs) + } + if o.ConsoleSocket != nil { + out = append(out, "--console-socket", o.ConsoleSocket.Path()) + } + if o.NoPivot { + out = append(out, "--no-pivot") + } + if o.NoSubreaper { + out = append(out, "-no-subreaper") + } + return out, nil +} + +// Restore restores a container with the provide id from an existing checkpoint +func (r *Runc) Restore(context context.Context, id, bundle string, opts *RestoreOpts) (int, error) { + args := []string{"restore"} + if opts != nil { + oargs, err := opts.args() + if err != nil { + return -1, err + } + args = append(args, oargs...) + } + args = append(args, "--bundle", bundle) + cmd := r.command(context, append(args, id)...) + if opts != nil && opts.IO != nil { + opts.Set(cmd) + } + ec, err := Monitor.Start(cmd) + if err != nil { + return -1, err + } + if opts != nil && opts.IO != nil { + if c, ok := opts.IO.(StartCloser); ok { + if err := c.CloseAfterStart(); err != nil { + return -1, err + } + } + } + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) + } + return status, err +} + +// Update updates the current container with the provided resource spec +func (r *Runc) Update(context context.Context, id string, resources *specs.LinuxResources) error { + buf := getBuf() + defer putBuf(buf) + + if err := json.NewEncoder(buf).Encode(resources); err != nil { + return err + } + args := []string{"update", "--resources", "-", id} + cmd := r.command(context, args...) + cmd.Stdin = buf + return r.runOrError(cmd) +} + +var ErrParseRuncVersion = errors.New("unable to parse runc version") + +type Version struct { + Runc string + Commit string + Spec string +} + +// Version returns the runc and runtime-spec versions +func (r *Runc) Version(context context.Context) (Version, error) { + data, err := cmdOutput(r.command(context, "--version"), false) + defer putBuf(data) + if err != nil { + return Version{}, err + } + return parseVersion(data.Bytes()) +} + +func parseVersion(data []byte) (Version, error) { + var v Version + parts := strings.Split(strings.TrimSpace(string(data)), "\n") + + if len(parts) > 0 { + if !strings.HasPrefix(parts[0], "runc version ") { + return v, nil + } + v.Runc = parts[0][13:] + + for _, part := range parts[1:] { + if strings.HasPrefix(part, "commit: ") { + v.Commit = part[8:] + } else if strings.HasPrefix(part, "spec: ") { + v.Spec = part[6:] + } + } + } + + return v, nil +} + +func (r *Runc) args() (out []string) { + if r.Root != "" { + out = append(out, "--root", r.Root) + } + if r.Debug { + out = append(out, "--debug") + } + if r.Log != "" { + out = append(out, "--log", r.Log) + } + if r.LogFormat != none { + out = append(out, "--log-format", string(r.LogFormat)) + } + if r.Criu != "" { + out = append(out, "--criu", r.Criu) + } + if r.SystemdCgroup { + out = append(out, "--systemd-cgroup") + } + if r.Rootless != nil { + // nil stands for "auto" (differs from explicit "false") + out = append(out, "--rootless="+strconv.FormatBool(*r.Rootless)) + } + return out +} + +// runOrError will run the provided command. If an error is +// encountered and neither Stdout or Stderr was set the error and the +// stderr of the command will be returned in the format of : +// +func (r *Runc) runOrError(cmd *exec.Cmd) error { + if cmd.Stdout != nil || cmd.Stderr != nil { + ec, err := Monitor.Start(cmd) + if err != nil { + return err + } + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) + } + return err + } + data, err := cmdOutput(cmd, true) + defer putBuf(data) + if err != nil { + return fmt.Errorf("%s: %s", err, data.String()) + } + return nil +} + +// callers of cmdOutput are expected to call putBuf on the returned Buffer +// to ensure it is released back to the shared pool after use. +func cmdOutput(cmd *exec.Cmd, combined bool) (*bytes.Buffer, error) { + b := getBuf() + + cmd.Stdout = b + if combined { + cmd.Stderr = b + } + ec, err := Monitor.Start(cmd) + if err != nil { + return nil, err + } + + status, err := Monitor.Wait(cmd, ec) + if err == nil && status != 0 { + err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) + } + + return b, err +} diff --git a/vendor/github.com/containerd/go-runc/utils.go b/vendor/github.com/containerd/go-runc/utils.go new file mode 100644 index 00000000..948b6336 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/utils.go @@ -0,0 +1,111 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runc + +import ( + "bytes" + "io/ioutil" + "strconv" + "strings" + "sync" + "syscall" +) + +// ReadPidFile reads the pid file at the provided path and returns +// the pid or an error if the read and conversion is unsuccessful +func ReadPidFile(path string) (int, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return -1, err + } + return strconv.Atoi(string(data)) +} + +const exitSignalOffset = 128 + +// exitStatus returns the correct exit status for a process based on if it +// was signaled or exited cleanly +func exitStatus(status syscall.WaitStatus) int { + if status.Signaled() { + return exitSignalOffset + int(status.Signal()) + } + return status.ExitStatus() +} + +var bytesBufferPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(nil) + }, +} + +func getBuf() *bytes.Buffer { + return bytesBufferPool.Get().(*bytes.Buffer) +} + +func putBuf(b *bytes.Buffer) { + if b == nil { + return + } + + b.Reset() + bytesBufferPool.Put(b) +} + +// fieldsASCII is similar to strings.Fields but only allows ASCII whitespaces +func fieldsASCII(s string) []string { + fn := func(r rune) bool { + switch r { + case '\t', '\n', '\f', '\r', ' ': + return true + } + return false + } + return strings.FieldsFunc(s, fn) +} + +// ParsePSOutput parses the runtime's ps raw output and returns a TopResults +func ParsePSOutput(output []byte) (*TopResults, error) { + topResults := &TopResults{} + + lines := strings.Split(string(output), "\n") + topResults.Headers = fieldsASCII(lines[0]) + + pidIndex := -1 + for i, name := range topResults.Headers { + if name == "PID" { + pidIndex = i + } + } + + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + + fields := fieldsASCII(line) + + if fields[pidIndex] == "-" { + continue + } + + process := fields[:len(topResults.Headers)-1] + process = append(process, strings.Join(fields[len(topResults.Headers)-1:], " ")) + topResults.Processes = append(topResults.Processes, process) + + } + return topResults, nil +} diff --git a/vendor/github.com/containerd/ttrpc/.gitignore b/vendor/github.com/containerd/ttrpc/.gitignore new file mode 100644 index 00000000..a1338d68 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/containerd/ttrpc/.travis.yml b/vendor/github.com/containerd/ttrpc/.travis.yml new file mode 100644 index 00000000..345d15d1 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/.travis.yml @@ -0,0 +1,23 @@ +dist: bionic +language: go + +go: + - "1.13.x" + +install: + # Don't change local go.{mod, sum} by go get tools. + # + # ref: https://github.com/golang/go/issues/27643 + - pushd ..; go get -u github.com/vbatts/git-validation; popd + - pushd ..; go get -u github.com/kunalkushwaha/ltag; popd + +before_script: + - pushd ..; git clone https://github.com/containerd/project; popd + +script: + - DCO_VERBOSITY=-q ../project/script/validate/dco + - ../project/script/validate/fileheader ../project/ + - go test -v -race -covermode=atomic -coverprofile=coverage.txt ./... + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/containerd/ttrpc/LICENSE b/vendor/github.com/containerd/ttrpc/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/ttrpc/README.md b/vendor/github.com/containerd/ttrpc/README.md new file mode 100644 index 00000000..c345c844 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/README.md @@ -0,0 +1,62 @@ +# ttrpc + +[![Build Status](https://travis-ci.org/containerd/ttrpc.svg?branch=master)](https://travis-ci.org/containerd/ttrpc) + +GRPC for low-memory environments. + +The existing grpc-go project requires a lot of memory overhead for importing +packages and at runtime. While this is great for many services with low density +requirements, this can be a problem when running a large number of services on +a single machine or on a machine with a small amount of memory. + +Using the same GRPC definitions, this project reduces the binary size and +protocol overhead required. We do this by eliding the `net/http`, `net/http2` +and `grpc` package used by grpc replacing it with a lightweight framing +protocol. The result are smaller binaries that use less resident memory with +the same ease of use as GRPC. + +Please note that while this project supports generating either end of the +protocol, the generated service definitions will be incompatible with regular +GRPC services, as they do not speak the same protocol. + +# Usage + +Create a gogo vanity binary (see +[`cmd/protoc-gen-gogottrpc/main.go`](cmd/protoc-gen-gogottrpc/main.go) for an +example with the ttrpc plugin enabled. + +It's recommended to use [`protobuild`](https://github.com//stevvooe/protobuild) +to build the protobufs for this project, but this will work with protoc +directly, if required. + +# Differences from GRPC + +- The protocol stack has been replaced with a lighter protocol that doesn't + require http, http2 and tls. +- The client and server interface are identical whereas in GRPC there is a + client and server interface that are different. +- The Go stdlib context package is used instead. +- No support for streams yet. + +# Status + +Very new. YMMV. + +TODO: + +- [X] Plumb error codes and GRPC status +- [X] Remove use of any type and dependency on typeurl package +- [X] Ensure that protocol can support streaming in the future +- [ ] Document protocol layout +- [ ] Add testing under concurrent load to ensure +- [ ] Verify connection error handling + +# Project details + +ttrpc is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/ttrpc/channel.go b/vendor/github.com/containerd/ttrpc/channel.go new file mode 100644 index 00000000..aa8c9541 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/channel.go @@ -0,0 +1,153 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "bufio" + "encoding/binary" + "io" + "net" + "sync" + + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + messageHeaderLength = 10 + messageLengthMax = 4 << 20 +) + +type messageType uint8 + +const ( + messageTypeRequest messageType = 0x1 + messageTypeResponse messageType = 0x2 +) + +// messageHeader represents the fixed-length message header of 10 bytes sent +// with every request. +type messageHeader struct { + Length uint32 // length excluding this header. b[:4] + StreamID uint32 // identifies which request stream message is a part of. b[4:8] + Type messageType // message type b[8] + Flags uint8 // reserved b[9] +} + +func readMessageHeader(p []byte, r io.Reader) (messageHeader, error) { + _, err := io.ReadFull(r, p[:messageHeaderLength]) + if err != nil { + return messageHeader{}, err + } + + return messageHeader{ + Length: binary.BigEndian.Uint32(p[:4]), + StreamID: binary.BigEndian.Uint32(p[4:8]), + Type: messageType(p[8]), + Flags: p[9], + }, nil +} + +func writeMessageHeader(w io.Writer, p []byte, mh messageHeader) error { + binary.BigEndian.PutUint32(p[:4], mh.Length) + binary.BigEndian.PutUint32(p[4:8], mh.StreamID) + p[8] = byte(mh.Type) + p[9] = mh.Flags + + _, err := w.Write(p[:]) + return err +} + +var buffers sync.Pool + +type channel struct { + conn net.Conn + bw *bufio.Writer + br *bufio.Reader + hrbuf [messageHeaderLength]byte // avoid alloc when reading header + hwbuf [messageHeaderLength]byte +} + +func newChannel(conn net.Conn) *channel { + return &channel{ + conn: conn, + bw: bufio.NewWriter(conn), + br: bufio.NewReader(conn), + } +} + +// recv a message from the channel. The returned buffer contains the message. +// +// If a valid grpc status is returned, the message header +// returned will be valid and caller should send that along to +// the correct consumer. The bytes on the underlying channel +// will be discarded. +func (ch *channel) recv() (messageHeader, []byte, error) { + mh, err := readMessageHeader(ch.hrbuf[:], ch.br) + if err != nil { + return messageHeader{}, nil, err + } + + if mh.Length > uint32(messageLengthMax) { + if _, err := ch.br.Discard(int(mh.Length)); err != nil { + return mh, nil, errors.Wrapf(err, "failed to discard after receiving oversized message") + } + + return mh, nil, status.Errorf(codes.ResourceExhausted, "message length %v exceed maximum message size of %v", mh.Length, messageLengthMax) + } + + p := ch.getmbuf(int(mh.Length)) + if _, err := io.ReadFull(ch.br, p); err != nil { + return messageHeader{}, nil, errors.Wrapf(err, "failed reading message") + } + + return mh, p, nil +} + +func (ch *channel) send(streamID uint32, t messageType, p []byte) error { + if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t}); err != nil { + return err + } + + _, err := ch.bw.Write(p) + if err != nil { + return err + } + + return ch.bw.Flush() +} + +func (ch *channel) getmbuf(size int) []byte { + // we can't use the standard New method on pool because we want to allocate + // based on size. + b, ok := buffers.Get().(*[]byte) + if !ok || cap(*b) < size { + // TODO(stevvooe): It may be better to allocate these in fixed length + // buckets to reduce fragmentation but its not clear that would help + // with performance. An ilogb approach or similar would work well. + bb := make([]byte, size) + b = &bb + } else { + *b = (*b)[:size] + } + return *b +} + +func (ch *channel) putmbuf(p []byte) { + buffers.Put(&p) +} diff --git a/vendor/github.com/containerd/ttrpc/client.go b/vendor/github.com/containerd/ttrpc/client.go new file mode 100644 index 00000000..e8169413 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/client.go @@ -0,0 +1,353 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "io" + "net" + "os" + "strings" + "sync" + "syscall" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ErrClosed is returned by client methods when the underlying connection is +// closed. +var ErrClosed = errors.New("ttrpc: closed") + +// Client for a ttrpc server +type Client struct { + codec codec + conn net.Conn + channel *channel + calls chan *callRequest + + ctx context.Context + closed func() + + closeOnce sync.Once + userCloseFunc func() + + errOnce sync.Once + err error + interceptor UnaryClientInterceptor +} + +// ClientOpts configures a client +type ClientOpts func(c *Client) + +// WithOnClose sets the close func whenever the client's Close() method is called +func WithOnClose(onClose func()) ClientOpts { + return func(c *Client) { + c.userCloseFunc = onClose + } +} + +// WithUnaryClientInterceptor sets the provided client interceptor +func WithUnaryClientInterceptor(i UnaryClientInterceptor) ClientOpts { + return func(c *Client) { + c.interceptor = i + } +} + +func NewClient(conn net.Conn, opts ...ClientOpts) *Client { + ctx, cancel := context.WithCancel(context.Background()) + c := &Client{ + codec: codec{}, + conn: conn, + channel: newChannel(conn), + calls: make(chan *callRequest), + closed: cancel, + ctx: ctx, + userCloseFunc: func() {}, + interceptor: defaultClientInterceptor, + } + + for _, o := range opts { + o(c) + } + + go c.run() + return c +} + +type callRequest struct { + ctx context.Context + req *Request + resp *Response // response will be written back here + errs chan error // error written here on completion +} + +func (c *Client) Call(ctx context.Context, service, method string, req, resp interface{}) error { + payload, err := c.codec.Marshal(req) + if err != nil { + return err + } + + var ( + creq = &Request{ + Service: service, + Method: method, + Payload: payload, + } + + cresp = &Response{} + ) + + if metadata, ok := GetMetadata(ctx); ok { + metadata.setRequest(creq) + } + + if dl, ok := ctx.Deadline(); ok { + creq.TimeoutNano = dl.Sub(time.Now()).Nanoseconds() + } + + info := &UnaryClientInfo{ + FullMethod: fullPath(service, method), + } + if err := c.interceptor(ctx, creq, cresp, info, c.dispatch); err != nil { + return err + } + + if err := c.codec.Unmarshal(cresp.Payload, resp); err != nil { + return err + } + + if cresp.Status != nil && cresp.Status.Code != int32(codes.OK) { + return status.ErrorProto(cresp.Status) + } + return nil +} + +func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error { + errs := make(chan error, 1) + call := &callRequest{ + ctx: ctx, + req: req, + resp: resp, + errs: errs, + } + + select { + case <-ctx.Done(): + return ctx.Err() + case c.calls <- call: + case <-c.ctx.Done(): + return c.error() + } + + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-errs: + return filterCloseErr(err) + case <-c.ctx.Done(): + return c.error() + } +} + +func (c *Client) Close() error { + c.closeOnce.Do(func() { + c.closed() + }) + return nil +} + +type message struct { + messageHeader + p []byte + err error +} + +type receiver struct { + wg *sync.WaitGroup + messages chan *message + err error +} + +func (r *receiver) run(ctx context.Context, c *channel) { + defer r.wg.Done() + + for { + select { + case <-ctx.Done(): + r.err = ctx.Err() + return + default: + mh, p, err := c.recv() + if err != nil { + _, ok := status.FromError(err) + if !ok { + // treat all errors that are not an rpc status as terminal. + // all others poison the connection. + r.err = filterCloseErr(err) + return + } + } + select { + case r.messages <- &message{ + messageHeader: mh, + p: p[:mh.Length], + err: err, + }: + case <-ctx.Done(): + r.err = ctx.Err() + return + } + } + } +} + +func (c *Client) run() { + var ( + streamID uint32 = 1 + waiters = make(map[uint32]*callRequest) + calls = c.calls + incoming = make(chan *message) + receiversDone = make(chan struct{}) + wg sync.WaitGroup + ) + + // broadcast the shutdown error to the remaining waiters. + abortWaiters := func(wErr error) { + for _, waiter := range waiters { + waiter.errs <- wErr + } + } + recv := &receiver{ + wg: &wg, + messages: incoming, + } + wg.Add(1) + + go func() { + wg.Wait() + close(receiversDone) + }() + go recv.run(c.ctx, c.channel) + + defer func() { + c.conn.Close() + c.userCloseFunc() + }() + + for { + select { + case call := <-calls: + if err := c.send(streamID, messageTypeRequest, call.req); err != nil { + call.errs <- err + continue + } + + waiters[streamID] = call + streamID += 2 // enforce odd client initiated request ids + case msg := <-incoming: + call, ok := waiters[msg.StreamID] + if !ok { + logrus.Errorf("ttrpc: received message for unknown channel %v", msg.StreamID) + continue + } + + call.errs <- c.recv(call.resp, msg) + delete(waiters, msg.StreamID) + case <-receiversDone: + // all the receivers have exited + if recv.err != nil { + c.setError(recv.err) + } + // don't return out, let the close of the context trigger the abort of waiters + c.Close() + case <-c.ctx.Done(): + abortWaiters(c.error()) + return + } + } +} + +func (c *Client) error() error { + c.errOnce.Do(func() { + if c.err == nil { + c.err = ErrClosed + } + }) + return c.err +} + +func (c *Client) setError(err error) { + c.errOnce.Do(func() { + c.err = err + }) +} + +func (c *Client) send(streamID uint32, mtype messageType, msg interface{}) error { + p, err := c.codec.Marshal(msg) + if err != nil { + return err + } + + return c.channel.send(streamID, mtype, p) +} + +func (c *Client) recv(resp *Response, msg *message) error { + if msg.err != nil { + return msg.err + } + + if msg.Type != messageTypeResponse { + return errors.New("unknown message type received") + } + + defer c.channel.putmbuf(msg.p) + return proto.Unmarshal(msg.p, resp) +} + +// filterCloseErr rewrites EOF and EPIPE errors to ErrClosed. Use when +// returning from call or handling errors from main read loop. +// +// This purposely ignores errors with a wrapped cause. +func filterCloseErr(err error) error { + switch { + case err == nil: + return nil + case err == io.EOF: + return ErrClosed + case errors.Cause(err) == io.EOF: + return ErrClosed + case strings.Contains(err.Error(), "use of closed network connection"): + return ErrClosed + default: + // if we have an epipe on a write or econnreset on a read , we cast to errclosed + if oerr, ok := err.(*net.OpError); ok && (oerr.Op == "write" || oerr.Op == "read") { + serr, sok := oerr.Err.(*os.SyscallError) + if sok && ((serr.Err == syscall.EPIPE && oerr.Op == "write") || + (serr.Err == syscall.ECONNRESET && oerr.Op == "read")) { + + return ErrClosed + } + } + } + + return err +} diff --git a/vendor/github.com/containerd/ttrpc/codec.go b/vendor/github.com/containerd/ttrpc/codec.go new file mode 100644 index 00000000..b4aac2fa --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/codec.go @@ -0,0 +1,42 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" +) + +type codec struct{} + +func (c codec) Marshal(msg interface{}) ([]byte, error) { + switch v := msg.(type) { + case proto.Message: + return proto.Marshal(v) + default: + return nil, errors.Errorf("ttrpc: cannot marshal unknown type: %T", msg) + } +} + +func (c codec) Unmarshal(p []byte, msg interface{}) error { + switch v := msg.(type) { + case proto.Message: + return proto.Unmarshal(p, v) + default: + return errors.Errorf("ttrpc: cannot unmarshal into unknown type: %T", msg) + } +} diff --git a/vendor/github.com/containerd/ttrpc/config.go b/vendor/github.com/containerd/ttrpc/config.go new file mode 100644 index 00000000..6a53c112 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/config.go @@ -0,0 +1,52 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import "github.com/pkg/errors" + +type serverConfig struct { + handshaker Handshaker + interceptor UnaryServerInterceptor +} + +// ServerOpt for configuring a ttrpc server +type ServerOpt func(*serverConfig) error + +// WithServerHandshaker can be passed to NewServer to ensure that the +// handshaker is called before every connection attempt. +// +// Only one handshaker is allowed per server. +func WithServerHandshaker(handshaker Handshaker) ServerOpt { + return func(c *serverConfig) error { + if c.handshaker != nil { + return errors.New("only one handshaker allowed per server") + } + c.handshaker = handshaker + return nil + } +} + +// WithUnaryServerInterceptor sets the provided interceptor on the server +func WithUnaryServerInterceptor(i UnaryServerInterceptor) ServerOpt { + return func(c *serverConfig) error { + if c.interceptor != nil { + return errors.New("only one interceptor allowed per server") + } + c.interceptor = i + return nil + } +} diff --git a/vendor/github.com/containerd/ttrpc/go.mod b/vendor/github.com/containerd/ttrpc/go.mod new file mode 100644 index 00000000..4ed7512f --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/go.mod @@ -0,0 +1,14 @@ +module github.com/containerd/ttrpc + +go 1.13 + +require ( + github.com/gogo/protobuf v1.3.1 + github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect + github.com/pkg/errors v0.9.1 + github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1 + github.com/sirupsen/logrus v1.4.2 + golang.org/x/sys v0.0.0-20200120151820-655fe14d7479 + google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24 + google.golang.org/grpc v1.26.0 +) diff --git a/vendor/github.com/containerd/ttrpc/go.sum b/vendor/github.com/containerd/ttrpc/go.sum new file mode 100644 index 00000000..8f641f4f --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/go.sum @@ -0,0 +1,85 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1 h1:Lo6mRUjdS99f3zxYOUalftWHUoOGaDRqFk1+j0Q57/I= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5 h1:f005F/Jl5JLP036x7QIvUVhNTqxvSYwFIiyOh2q12iU= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479 h1:LhLiKguPgZL+Tglay4GhVtfF0kb8cvOJ0dHTCBO8YNI= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69 h1:4rNOqY4ULrKzS6twXa619uQgI7h9PaVd4ZhjFQ7C5zs= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24 h1:wDju+RU97qa0FZT0QnZDg9Uc2dH0Ql513kFvHocz+WM= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/containerd/ttrpc/handshake.go b/vendor/github.com/containerd/ttrpc/handshake.go new file mode 100644 index 00000000..a424b67a --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/handshake.go @@ -0,0 +1,50 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "net" +) + +// Handshaker defines the interface for connection handshakes performed on the +// server or client when first connecting. +type Handshaker interface { + // Handshake should confirm or decorate a connection that may be incoming + // to a server or outgoing from a client. + // + // If this returns without an error, the caller should use the connection + // in place of the original connection. + // + // The second return value can contain credential specific data, such as + // unix socket credentials or TLS information. + // + // While we currently only have implementations on the server-side, this + // interface should be sufficient to implement similar handshakes on the + // client-side. + Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) +} + +type handshakerFunc func(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) + +func (fn handshakerFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) { + return fn(ctx, conn) +} + +func noopHandshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) { + return conn, nil, nil +} diff --git a/vendor/github.com/containerd/ttrpc/interceptor.go b/vendor/github.com/containerd/ttrpc/interceptor.go new file mode 100644 index 00000000..c1219dac --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/interceptor.go @@ -0,0 +1,50 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import "context" + +// UnaryServerInfo provides information about the server request +type UnaryServerInfo struct { + FullMethod string +} + +// UnaryClientInfo provides information about the client request +type UnaryClientInfo struct { + FullMethod string +} + +// Unmarshaler contains the server request data and allows it to be unmarshaled +// into a concrete type +type Unmarshaler func(interface{}) error + +// Invoker invokes the client's request and response from the ttrpc server +type Invoker func(context.Context, *Request, *Response) error + +// UnaryServerInterceptor specifies the interceptor function for server request/response +type UnaryServerInterceptor func(context.Context, Unmarshaler, *UnaryServerInfo, Method) (interface{}, error) + +// UnaryClientInterceptor specifies the interceptor function for client request/response +type UnaryClientInterceptor func(context.Context, *Request, *Response, *UnaryClientInfo, Invoker) error + +func defaultServerInterceptor(ctx context.Context, unmarshal Unmarshaler, info *UnaryServerInfo, method Method) (interface{}, error) { + return method(ctx, unmarshal) +} + +func defaultClientInterceptor(ctx context.Context, req *Request, resp *Response, _ *UnaryClientInfo, invoker Invoker) error { + return invoker(ctx, req, resp) +} diff --git a/vendor/github.com/containerd/ttrpc/metadata.go b/vendor/github.com/containerd/ttrpc/metadata.go new file mode 100644 index 00000000..ce8c0d13 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/metadata.go @@ -0,0 +1,107 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "strings" +) + +// MD is the user type for ttrpc metadata +type MD map[string][]string + +// Get returns the metadata for a given key when they exist. +// If there is no metadata, a nil slice and false are returned. +func (m MD) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + list, ok := m[key] + if !ok || len(list) == 0 { + return nil, false + } + + return list, true +} + +// Set sets the provided values for a given key. +// The values will overwrite any existing values. +// If no values provided, a key will be deleted. +func (m MD) Set(key string, values ...string) { + key = strings.ToLower(key) + if len(values) == 0 { + delete(m, key) + return + } + m[key] = values +} + +// Append appends additional values to the given key. +func (m MD) Append(key string, values ...string) { + key = strings.ToLower(key) + if len(values) == 0 { + return + } + current, ok := m[key] + if ok { + m.Set(key, append(current, values...)...) + } else { + m.Set(key, values...) + } +} + +func (m MD) setRequest(r *Request) { + for k, values := range m { + for _, v := range values { + r.Metadata = append(r.Metadata, &KeyValue{ + Key: k, + Value: v, + }) + } + } +} + +func (m MD) fromRequest(r *Request) { + for _, kv := range r.Metadata { + m[kv.Key] = append(m[kv.Key], kv.Value) + } +} + +type metadataKey struct{} + +// GetMetadata retrieves metadata from context.Context (previously attached with WithMetadata) +func GetMetadata(ctx context.Context) (MD, bool) { + metadata, ok := ctx.Value(metadataKey{}).(MD) + return metadata, ok +} + +// GetMetadataValue gets a specific metadata value by name from context.Context +func GetMetadataValue(ctx context.Context, name string) (string, bool) { + metadata, ok := GetMetadata(ctx) + if !ok { + return "", false + } + + if list, ok := metadata.Get(name); ok { + return list[0], true + } + + return "", false +} + +// WithMetadata attaches metadata map to a context.Context +func WithMetadata(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, metadataKey{}, md) +} diff --git a/vendor/github.com/containerd/ttrpc/server.go b/vendor/github.com/containerd/ttrpc/server.go new file mode 100644 index 00000000..1d4f1df6 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/server.go @@ -0,0 +1,485 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "io" + "math/rand" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + ErrServerClosed = errors.New("ttrpc: server closed") +) + +type Server struct { + config *serverConfig + services *serviceSet + codec codec + + mu sync.Mutex + listeners map[net.Listener]struct{} + connections map[*serverConn]struct{} // all connections to current state + done chan struct{} // marks point at which we stop serving requests +} + +func NewServer(opts ...ServerOpt) (*Server, error) { + config := &serverConfig{} + for _, opt := range opts { + if err := opt(config); err != nil { + return nil, err + } + } + if config.interceptor == nil { + config.interceptor = defaultServerInterceptor + } + + return &Server{ + config: config, + services: newServiceSet(config.interceptor), + done: make(chan struct{}), + listeners: make(map[net.Listener]struct{}), + connections: make(map[*serverConn]struct{}), + }, nil +} + +func (s *Server) Register(name string, methods map[string]Method) { + s.services.register(name, methods) +} + +func (s *Server) Serve(ctx context.Context, l net.Listener) error { + s.addListener(l) + defer s.closeListener(l) + + var ( + backoff time.Duration + handshaker = s.config.handshaker + ) + + if handshaker == nil { + handshaker = handshakerFunc(noopHandshake) + } + + for { + conn, err := l.Accept() + if err != nil { + select { + case <-s.done: + return ErrServerClosed + default: + } + + if terr, ok := err.(interface { + Temporary() bool + }); ok && terr.Temporary() { + if backoff == 0 { + backoff = time.Millisecond + } else { + backoff *= 2 + } + + if max := time.Second; backoff > max { + backoff = max + } + + sleep := time.Duration(rand.Int63n(int64(backoff))) + logrus.WithError(err).Errorf("ttrpc: failed accept; backoff %v", sleep) + time.Sleep(sleep) + continue + } + + return err + } + + backoff = 0 + + approved, handshake, err := handshaker.Handshake(ctx, conn) + if err != nil { + logrus.WithError(err).Errorf("ttrpc: refusing connection after handshake") + conn.Close() + continue + } + + sc := s.newConn(approved, handshake) + go sc.run(ctx) + } +} + +func (s *Server) Shutdown(ctx context.Context) error { + s.mu.Lock() + select { + case <-s.done: + default: + // protected by mutex + close(s.done) + } + lnerr := s.closeListeners() + s.mu.Unlock() + + ticker := time.NewTicker(200 * time.Millisecond) + defer ticker.Stop() + for { + if s.closeIdleConns() { + return lnerr + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } +} + +// Close the server without waiting for active connections. +func (s *Server) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + + select { + case <-s.done: + default: + // protected by mutex + close(s.done) + } + + err := s.closeListeners() + for c := range s.connections { + c.close() + delete(s.connections, c) + } + + return err +} + +func (s *Server) addListener(l net.Listener) { + s.mu.Lock() + defer s.mu.Unlock() + s.listeners[l] = struct{}{} +} + +func (s *Server) closeListener(l net.Listener) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.closeListenerLocked(l) +} + +func (s *Server) closeListenerLocked(l net.Listener) error { + defer delete(s.listeners, l) + return l.Close() +} + +func (s *Server) closeListeners() error { + var err error + for l := range s.listeners { + if cerr := s.closeListenerLocked(l); cerr != nil && err == nil { + err = cerr + } + } + return err +} + +func (s *Server) addConnection(c *serverConn) { + s.mu.Lock() + defer s.mu.Unlock() + + s.connections[c] = struct{}{} +} + +func (s *Server) closeIdleConns() bool { + s.mu.Lock() + defer s.mu.Unlock() + quiescent := true + for c := range s.connections { + st, ok := c.getState() + if !ok || st != connStateIdle { + quiescent = false + continue + } + c.close() + delete(s.connections, c) + } + return quiescent +} + +type connState int + +const ( + connStateActive = iota + 1 // outstanding requests + connStateIdle // no requests + connStateClosed // closed connection +) + +func (cs connState) String() string { + switch cs { + case connStateActive: + return "active" + case connStateIdle: + return "idle" + case connStateClosed: + return "closed" + default: + return "unknown" + } +} + +func (s *Server) newConn(conn net.Conn, handshake interface{}) *serverConn { + c := &serverConn{ + server: s, + conn: conn, + handshake: handshake, + shutdown: make(chan struct{}), + } + c.setState(connStateIdle) + s.addConnection(c) + return c +} + +type serverConn struct { + server *Server + conn net.Conn + handshake interface{} // data from handshake, not used for now + state atomic.Value + + shutdownOnce sync.Once + shutdown chan struct{} // forced shutdown, used by close +} + +func (c *serverConn) getState() (connState, bool) { + cs, ok := c.state.Load().(connState) + return cs, ok +} + +func (c *serverConn) setState(newstate connState) { + c.state.Store(newstate) +} + +func (c *serverConn) close() error { + c.shutdownOnce.Do(func() { + close(c.shutdown) + }) + + return nil +} + +func (c *serverConn) run(sctx context.Context) { + type ( + request struct { + id uint32 + req *Request + } + + response struct { + id uint32 + resp *Response + } + ) + + var ( + ch = newChannel(c.conn) + ctx, cancel = context.WithCancel(sctx) + active int + state connState = connStateIdle + responses = make(chan response) + requests = make(chan request) + recvErr = make(chan error, 1) + shutdown = c.shutdown + done = make(chan struct{}) + ) + + defer c.conn.Close() + defer cancel() + defer close(done) + + go func(recvErr chan error) { + defer close(recvErr) + sendImmediate := func(id uint32, st *status.Status) bool { + select { + case responses <- response{ + // even though we've had an invalid stream id, we send it + // back on the same stream id so the client knows which + // stream id was bad. + id: id, + resp: &Response{ + Status: st.Proto(), + }, + }: + return true + case <-c.shutdown: + return false + case <-done: + return false + } + } + + for { + select { + case <-c.shutdown: + return + case <-done: + return + default: // proceed + } + + mh, p, err := ch.recv() + if err != nil { + status, ok := status.FromError(err) + if !ok { + recvErr <- err + return + } + + // in this case, we send an error for that particular message + // when the status is defined. + if !sendImmediate(mh.StreamID, status) { + return + } + + continue + } + + if mh.Type != messageTypeRequest { + // we must ignore this for future compat. + continue + } + + var req Request + if err := c.server.codec.Unmarshal(p, &req); err != nil { + ch.putmbuf(p) + if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "unmarshal request error: %v", err)) { + return + } + continue + } + ch.putmbuf(p) + + if mh.StreamID%2 != 1 { + // enforce odd client initiated identifiers. + if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID must be odd for client initiated streams")) { + return + } + continue + } + + // Forward the request to the main loop. We don't wait on s.done + // because we have already accepted the client request. + select { + case requests <- request{ + id: mh.StreamID, + req: &req, + }: + case <-done: + return + } + } + }(recvErr) + + for { + newstate := state + switch { + case active > 0: + newstate = connStateActive + shutdown = nil + case active == 0: + newstate = connStateIdle + shutdown = c.shutdown // only enable this branch in idle mode + } + + if newstate != state { + c.setState(newstate) + state = newstate + } + + select { + case request := <-requests: + active++ + go func(id uint32) { + ctx, cancel := getRequestContext(ctx, request.req) + defer cancel() + + p, status := c.server.services.call(ctx, request.req.Service, request.req.Method, request.req.Payload) + resp := &Response{ + Status: status.Proto(), + Payload: p, + } + + select { + case responses <- response{ + id: id, + resp: resp, + }: + case <-done: + } + }(request.id) + case response := <-responses: + p, err := c.server.codec.Marshal(response.resp) + if err != nil { + logrus.WithError(err).Error("failed marshaling response") + return + } + + if err := ch.send(response.id, messageTypeResponse, p); err != nil { + logrus.WithError(err).Error("failed sending message on channel") + return + } + + active-- + case err := <-recvErr: + // TODO(stevvooe): Not wildly clear what we should do in this + // branch. Basically, it means that we are no longer receiving + // requests due to a terminal error. + recvErr = nil // connection is now "closing" + if err == io.EOF || err == io.ErrUnexpectedEOF { + // The client went away and we should stop processing + // requests, so that the client connection is closed + return + } + if err != nil { + logrus.WithError(err).Error("error receiving message") + } + case <-shutdown: + return + } + } +} + +var noopFunc = func() {} + +func getRequestContext(ctx context.Context, req *Request) (retCtx context.Context, cancel func()) { + if len(req.Metadata) > 0 { + md := MD{} + md.fromRequest(req) + ctx = WithMetadata(ctx, md) + } + + cancel = noopFunc + if req.TimeoutNano == 0 { + return ctx, cancel + } + + ctx, cancel = context.WithTimeout(ctx, time.Duration(req.TimeoutNano)) + return ctx, cancel +} diff --git a/vendor/github.com/containerd/ttrpc/services.go b/vendor/github.com/containerd/ttrpc/services.go new file mode 100644 index 00000000..2a83ba88 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/services.go @@ -0,0 +1,165 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "io" + "os" + "path" + "unsafe" + + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type Method func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) + +type ServiceDesc struct { + Methods map[string]Method + + // TODO(stevvooe): Add stream support. +} + +type serviceSet struct { + services map[string]ServiceDesc + interceptor UnaryServerInterceptor +} + +func newServiceSet(interceptor UnaryServerInterceptor) *serviceSet { + return &serviceSet{ + services: make(map[string]ServiceDesc), + interceptor: interceptor, + } +} + +func (s *serviceSet) register(name string, methods map[string]Method) { + if _, ok := s.services[name]; ok { + panic(errors.Errorf("duplicate service %v registered", name)) + } + + s.services[name] = ServiceDesc{ + Methods: methods, + } +} + +func (s *serviceSet) call(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, *status.Status) { + p, err := s.dispatch(ctx, serviceName, methodName, p) + st, ok := status.FromError(err) + if !ok { + st = status.New(convertCode(err), err.Error()) + } + + return p, st +} + +func (s *serviceSet) dispatch(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, error) { + method, err := s.resolve(serviceName, methodName) + if err != nil { + return nil, err + } + + unmarshal := func(obj interface{}) error { + switch v := obj.(type) { + case proto.Message: + if err := proto.Unmarshal(p, v); err != nil { + return status.Errorf(codes.Internal, "ttrpc: error unmarshalling payload: %v", err.Error()) + } + default: + return status.Errorf(codes.Internal, "ttrpc: error unsupported request type: %T", v) + } + return nil + } + + info := &UnaryServerInfo{ + FullMethod: fullPath(serviceName, methodName), + } + + resp, err := s.interceptor(ctx, unmarshal, info, method) + if err != nil { + return nil, err + } + + if isNil(resp) { + return nil, errors.New("ttrpc: marshal called with nil") + } + + switch v := resp.(type) { + case proto.Message: + r, err := proto.Marshal(v) + if err != nil { + return nil, status.Errorf(codes.Internal, "ttrpc: error marshaling payload: %v", err.Error()) + } + + return r, nil + default: + return nil, status.Errorf(codes.Internal, "ttrpc: error unsupported response type: %T", v) + } +} + +func (s *serviceSet) resolve(service, method string) (Method, error) { + srv, ok := s.services[service] + if !ok { + return nil, status.Errorf(codes.NotFound, "service %v", service) + } + + mthd, ok := srv.Methods[method] + if !ok { + return nil, status.Errorf(codes.NotFound, "method %v", method) + } + + return mthd, nil +} + +// convertCode maps stdlib go errors into grpc space. +// +// This is ripped from the grpc-go code base. +func convertCode(err error) codes.Code { + switch err { + case nil: + return codes.OK + case io.EOF: + return codes.OutOfRange + case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: + return codes.FailedPrecondition + case os.ErrInvalid: + return codes.InvalidArgument + case context.Canceled: + return codes.Canceled + case context.DeadlineExceeded: + return codes.DeadlineExceeded + } + switch { + case os.IsExist(err): + return codes.AlreadyExists + case os.IsNotExist(err): + return codes.NotFound + case os.IsPermission(err): + return codes.PermissionDenied + } + return codes.Unknown +} + +func fullPath(service, method string) string { + return "/" + path.Join(service, method) +} + +func isNil(resp interface{}) bool { + return (*[2]uintptr)(unsafe.Pointer(&resp))[1] == 0 +} diff --git a/vendor/github.com/containerd/ttrpc/types.go b/vendor/github.com/containerd/ttrpc/types.go new file mode 100644 index 00000000..9a1c19a7 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/types.go @@ -0,0 +1,63 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "fmt" + + spb "google.golang.org/genproto/googleapis/rpc/status" +) + +type Request struct { + Service string `protobuf:"bytes,1,opt,name=service,proto3"` + Method string `protobuf:"bytes,2,opt,name=method,proto3"` + Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3"` + TimeoutNano int64 `protobuf:"varint,4,opt,name=timeout_nano,proto3"` + Metadata []*KeyValue `protobuf:"bytes,5,rep,name=metadata,proto3"` +} + +func (r *Request) Reset() { *r = Request{} } +func (r *Request) String() string { return fmt.Sprintf("%+#v", r) } +func (r *Request) ProtoMessage() {} + +type Response struct { + Status *spb.Status `protobuf:"bytes,1,opt,name=status,proto3"` + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3"` +} + +func (r *Response) Reset() { *r = Response{} } +func (r *Response) String() string { return fmt.Sprintf("%+#v", r) } +func (r *Response) ProtoMessage() {} + +type StringList struct { + List []string `protobuf:"bytes,1,rep,name=list,proto3"` +} + +func (r *StringList) Reset() { *r = StringList{} } +func (r *StringList) String() string { return fmt.Sprintf("%+#v", r) } +func (r *StringList) ProtoMessage() {} + +func makeStringList(item ...string) StringList { return StringList{List: item} } + +type KeyValue struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3"` + Value string `protobuf:"bytes,2,opt,name=value,proto3"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (*KeyValue) ProtoMessage() {} +func (m *KeyValue) String() string { return fmt.Sprintf("%+#v", m) } diff --git a/vendor/github.com/containerd/ttrpc/unixcreds_linux.go b/vendor/github.com/containerd/ttrpc/unixcreds_linux.go new file mode 100644 index 00000000..a471bd36 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/unixcreds_linux.go @@ -0,0 +1,108 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "net" + "os" + "syscall" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +type UnixCredentialsFunc func(*unix.Ucred) error + +func (fn UnixCredentialsFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) { + uc, err := requireUnixSocket(conn) + if err != nil { + return nil, nil, errors.Wrap(err, "ttrpc.UnixCredentialsFunc: require unix socket") + } + + rs, err := uc.SyscallConn() + if err != nil { + return nil, nil, errors.Wrap(err, "ttrpc.UnixCredentialsFunc: (net.UnixConn).SyscallConn failed") + } + var ( + ucred *unix.Ucred + ucredErr error + ) + if err := rs.Control(func(fd uintptr) { + ucred, ucredErr = unix.GetsockoptUcred(int(fd), unix.SOL_SOCKET, unix.SO_PEERCRED) + }); err != nil { + return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: (*syscall.RawConn).Control failed") + } + + if ucredErr != nil { + return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: failed to retrieve socket peer credentials") + } + + if err := fn(ucred); err != nil { + return nil, nil, errors.Wrapf(err, "ttrpc.UnixCredentialsFunc: credential check failed") + } + + return uc, ucred, nil +} + +// UnixSocketRequireUidGid requires specific *effective* UID/GID, rather than the real UID/GID. +// +// For example, if a daemon binary is owned by the root (UID 0) with SUID bit but running as an +// unprivileged user (UID 1001), the effective UID becomes 0, and the real UID becomes 1001. +// So calling this function with uid=0 allows a connection from effective UID 0 but rejects +// a connection from effective UID 1001. +// +// See socket(7), SO_PEERCRED: "The returned credentials are those that were in effect at the time of the call to connect(2) or socketpair(2)." +func UnixSocketRequireUidGid(uid, gid int) UnixCredentialsFunc { + return func(ucred *unix.Ucred) error { + return requireUidGid(ucred, uid, gid) + } +} + +func UnixSocketRequireRoot() UnixCredentialsFunc { + return UnixSocketRequireUidGid(0, 0) +} + +// UnixSocketRequireSameUser resolves the current effective unix user and returns a +// UnixCredentialsFunc that will validate incoming unix connections against the +// current credentials. +// +// This is useful when using abstract sockets that are accessible by all users. +func UnixSocketRequireSameUser() UnixCredentialsFunc { + euid, egid := os.Geteuid(), os.Getegid() + return UnixSocketRequireUidGid(euid, egid) +} + +func requireRoot(ucred *unix.Ucred) error { + return requireUidGid(ucred, 0, 0) +} + +func requireUidGid(ucred *unix.Ucred, uid, gid int) error { + if (uid != -1 && uint32(uid) != ucred.Uid) || (gid != -1 && uint32(gid) != ucred.Gid) { + return errors.Wrap(syscall.EPERM, "ttrpc: invalid credentials") + } + return nil +} + +func requireUnixSocket(conn net.Conn) (*net.UnixConn, error) { + uc, ok := conn.(*net.UnixConn) + if !ok { + return nil, errors.New("a unix socket connection is required") + } + + return uc, nil +} diff --git a/vendor/github.com/containerd/typeurl/.gitignore b/vendor/github.com/containerd/typeurl/.gitignore new file mode 100644 index 00000000..d5384677 --- /dev/null +++ b/vendor/github.com/containerd/typeurl/.gitignore @@ -0,0 +1,2 @@ +*.test +coverage.txt diff --git a/vendor/github.com/containerd/typeurl/.travis.yml b/vendor/github.com/containerd/typeurl/.travis.yml new file mode 100644 index 00000000..9b55164e --- /dev/null +++ b/vendor/github.com/containerd/typeurl/.travis.yml @@ -0,0 +1,19 @@ +dist: bionic +language: go +go: + - 1.13.x + +install: + - go get -u github.com/vbatts/git-validation + - go get -u github.com/kunalkushwaha/ltag + +before_script: + - pushd ..; git clone https://github.com/containerd/project; popd + +script: + - DCO_VERBOSITY=-q ../project/script/validate/dco + - ../project/script/validate/fileheader ../project/ + - go test -v -race -coverprofile=coverage.txt -covermode=atomic ./... + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/containerd/typeurl/LICENSE b/vendor/github.com/containerd/typeurl/LICENSE new file mode 100644 index 00000000..584149b6 --- /dev/null +++ b/vendor/github.com/containerd/typeurl/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/typeurl/README.md b/vendor/github.com/containerd/typeurl/README.md new file mode 100644 index 00000000..a663f43d --- /dev/null +++ b/vendor/github.com/containerd/typeurl/README.md @@ -0,0 +1,19 @@ +# typeurl + +[![Build Status](https://travis-ci.org/containerd/typeurl.svg?branch=master)](https://travis-ci.org/containerd/typeurl) + +[![codecov](https://codecov.io/gh/containerd/typeurl/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl) + +A Go package for managing the registration, marshaling, and unmarshaling of encoded types. + +This package helps when types are sent over a GRPC API and marshaled as a [protobuf.Any](https://github.com/gogo/protobuf/blob/master/protobuf/google/protobuf/any.proto). + +## Project details + +**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/typeurl/doc.go b/vendor/github.com/containerd/typeurl/doc.go new file mode 100644 index 00000000..c0d0fd20 --- /dev/null +++ b/vendor/github.com/containerd/typeurl/doc.go @@ -0,0 +1,83 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package typeurl + +// Package typeurl assists with managing the registration, marshaling, and +// unmarshaling of types encoded as protobuf.Any. +// +// A protobuf.Any is a proto message that can contain any arbitrary data. It +// consists of two components, a TypeUrl and a Value, and its proto definition +// looks like this: +// +// message Any { +// string type_url = 1; +// bytes value = 2; +// } +// +// The TypeUrl is used to distinguish the contents from other proto.Any +// messages. This typeurl library manages these URLs to enable automagic +// marshaling and unmarshaling of the contents. +// +// For example, consider this go struct: +// +// type Foo struct { +// Field1 string +// Field2 string +// } +// +// To use typeurl, types must first be registered. This is typically done in +// the init function +// +// func init() { +// typeurl.Register(&Foo{}, "Foo") +// } +// +// This will register the type Foo with the url path "Foo". The arguments to +// Register are variadic, and are used to construct a url path. Consider this +// example, from the github.com/containerd/containerd/client package: +// +// func init() { +// const prefix = "types.containerd.io" +// // register TypeUrls for commonly marshaled external types +// major := strconv.Itoa(specs.VersionMajor) +// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec") +// // this function has more Register calls, which are elided. +// } +// +// This registers several types under a more complex url, which ends up mapping +// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other +// value for major). +// +// Once a type is registered, it can be marshaled to a proto.Any message simply +// by calling `MarshalAny`, like this: +// +// foo := &Foo{Field1: "value1", Field2: "value2"} +// anyFoo, err := typeurl.MarshalAny(foo) +// +// MarshalAny will resolve the correct URL for the type. If the type in +// question implements the proto.Message interface, then it will be marshaled +// as a proto message. Otherwise, it will be marshaled as json. This means that +// typeurl will work on any arbitrary data, whether or not it has a proto +// definition, as long as it can be serialized to json. +// +// To unmarshal, the process is simply inverse: +// +// iface, err := typeurl.UnmarshalAny(anyFoo) +// foo := iface.(*Foo) +// +// The correct type is automatically chosen from the type registry, and the +// returned interface can be cast straight to that type. diff --git a/vendor/github.com/containerd/typeurl/go.mod b/vendor/github.com/containerd/typeurl/go.mod new file mode 100644 index 00000000..cfcee31f --- /dev/null +++ b/vendor/github.com/containerd/typeurl/go.mod @@ -0,0 +1,8 @@ +module github.com/containerd/typeurl + +go 1.13 + +require ( + github.com/gogo/protobuf v1.3.1 + github.com/pkg/errors v0.9.1 +) diff --git a/vendor/github.com/containerd/typeurl/go.sum b/vendor/github.com/containerd/typeurl/go.sum new file mode 100644 index 00000000..50815ccb --- /dev/null +++ b/vendor/github.com/containerd/typeurl/go.sum @@ -0,0 +1,7 @@ +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/containerd/typeurl/types.go b/vendor/github.com/containerd/typeurl/types.go new file mode 100644 index 00000000..b70314fd --- /dev/null +++ b/vendor/github.com/containerd/typeurl/types.go @@ -0,0 +1,168 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package typeurl + +import ( + "encoding/json" + "path" + "reflect" + "sync" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" +) + +var ( + mu sync.Mutex + registry = make(map[reflect.Type]string) +) + +var ErrNotFound = errors.New("not found") + +// Register a type with a base URL for JSON marshaling. When the MarshalAny and +// UnmarshalAny functions are called they will treat the Any type value as JSON. +// To use protocol buffers for handling the Any value the proto.Register +// function should be used instead of this function. +func Register(v interface{}, args ...string) { + var ( + t = tryDereference(v) + p = path.Join(args...) + ) + mu.Lock() + defer mu.Unlock() + if et, ok := registry[t]; ok { + if et != p { + panic(errors.Errorf("type registred with alternate path %q != %q", et, p)) + } + return + } + registry[t] = p +} + +// TypeURL returns the type url for a registred type. +func TypeURL(v interface{}) (string, error) { + mu.Lock() + u, ok := registry[tryDereference(v)] + mu.Unlock() + if !ok { + // fallback to the proto registry if it is a proto message + pb, ok := v.(proto.Message) + if !ok { + return "", errors.Wrapf(ErrNotFound, "type %s", reflect.TypeOf(v)) + } + return proto.MessageName(pb), nil + } + return u, nil +} + +// Is returns true if the type of the Any is the same as v. +func Is(any *types.Any, v interface{}) bool { + // call to check that v is a pointer + tryDereference(v) + url, err := TypeURL(v) + if err != nil { + return false + } + return any.TypeUrl == url +} + +// MarshalAny marshals the value v into an any with the correct TypeUrl. +// If the provided object is already a proto.Any message, then it will be +// returned verbatim. If it is of type proto.Message, it will be marshaled as a +// protocol buffer. Otherwise, the object will be marshaled to json. +func MarshalAny(v interface{}) (*types.Any, error) { + var marshal func(v interface{}) ([]byte, error) + switch t := v.(type) { + case *types.Any: + // avoid reserializing the type if we have an any. + return t, nil + case proto.Message: + marshal = func(v interface{}) ([]byte, error) { + return proto.Marshal(t) + } + default: + marshal = json.Marshal + } + + url, err := TypeURL(v) + if err != nil { + return nil, err + } + + data, err := marshal(v) + if err != nil { + return nil, err + } + return &types.Any{ + TypeUrl: url, + Value: data, + }, nil +} + +// UnmarshalAny unmarshals the any type into a concrete type. +func UnmarshalAny(any *types.Any) (interface{}, error) { + return UnmarshalByTypeURL(any.TypeUrl, any.Value) +} + +func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) { + t, err := getTypeByUrl(typeURL) + if err != nil { + return nil, err + } + v := reflect.New(t.t).Interface() + if t.isProto { + err = proto.Unmarshal(value, v.(proto.Message)) + } else { + err = json.Unmarshal(value, v) + } + return v, err +} + +type urlType struct { + t reflect.Type + isProto bool +} + +func getTypeByUrl(url string) (urlType, error) { + for t, u := range registry { + if u == url { + return urlType{ + t: t, + }, nil + } + } + // fallback to proto registry + t := proto.MessageType(url) + if t != nil { + return urlType{ + // get the underlying Elem because proto returns a pointer to the type + t: t.Elem(), + isProto: true, + }, nil + } + return urlType{}, errors.Wrapf(ErrNotFound, "type with url %s", url) +} + +func tryDereference(v interface{}) reflect.Type { + t := reflect.TypeOf(v) + if t.Kind() == reflect.Ptr { + // require check of pointer but dereference to register + return t.Elem() + } + panic("v is not a pointer to a type") +} diff --git a/vendor/github.com/containernetworking/cni/LICENSE b/vendor/github.com/containernetworking/cni/LICENSE new file mode 100644 index 00000000..8f71f43f --- /dev/null +++ b/vendor/github.com/containernetworking/cni/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go new file mode 100644 index 00000000..0f14d342 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/libcni/api.go @@ -0,0 +1,497 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/version" +) + +var ( + CacheDir = "/var/lib/cni" +) + +// A RuntimeConf holds the arguments to one invocation of a CNI plugin +// excepting the network configuration, with the nested exception that +// the `runtimeConfig` from the network configuration is included +// here. +type RuntimeConf struct { + ContainerID string + NetNS string + IfName string + Args [][2]string + // A dictionary of capability-specific data passed by the runtime + // to plugins as top-level keys in the 'runtimeConfig' dictionary + // of the plugin's stdin data. libcni will ensure that only keys + // in this map which match the capabilities of the plugin are passed + // to the plugin + CapabilityArgs map[string]interface{} + + // A cache directory in which to library data. Defaults to CacheDir + CacheDir string +} + +type NetworkConfig struct { + Network *types.NetConf + Bytes []byte +} + +type NetworkConfigList struct { + Name string + CNIVersion string + DisableCheck bool + Plugins []*NetworkConfig + Bytes []byte +} + +type CNI interface { + AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + + AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + + ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error) + ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) +} + +type CNIConfig struct { + Path []string + exec invoke.Exec +} + +// CNIConfig implements the CNI interface +var _ CNI = &CNIConfig{} + +// NewCNIConfig returns a new CNIConfig object that will search for plugins +// in the given paths and use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig { + return &CNIConfig{ + Path: path, + exec: exec, + } +} + +func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + inject := map[string]interface{}{ + "name": name, + "cniVersion": cniVersion, + } + // Add previous plugin result + if prevResult != nil { + inject["prevResult"] = prevResult + } + + // Ensure every config uses the same name and version + orig, err = InjectConf(orig, inject) + if err != nil { + return nil, err + } + + return injectRuntimeConfig(orig, rt) +} + +// This function takes a libcni RuntimeConf structure and injects values into +// a "runtimeConfig" dictionary in the CNI network configuration JSON that +// will be passed to the plugin on stdin. +// +// Only "capabilities arguments" passed by the runtime are currently injected. +// These capabilities arguments are filtered through the plugin's advertised +// capabilities from its config JSON, and any keys in the CapabilityArgs +// matching plugin capabilities are added to the "runtimeConfig" dictionary +// sent to the plugin via JSON on stdin. For example, if the plugin's +// capabilities include "portMappings", and the CapabilityArgs map includes a +// "portMappings" key, that key and its value are added to the "runtimeConfig" +// dictionary to be passed to the plugin's stdin. +func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + rc := make(map[string]interface{}) + for capability, supported := range orig.Network.Capabilities { + if !supported { + continue + } + if data, ok := rt.CapabilityArgs[capability]; ok { + rc[capability] = data + } + } + + if len(rc) > 0 { + orig, err = InjectConf(orig, map[string]interface{}{"runtimeConfig": rc}) + if err != nil { + return nil, err + } + } + + return orig, nil +} + +// ensure we have a usable exec if the CNIConfig was not given one +func (c *CNIConfig) ensureExec() invoke.Exec { + if c.exec == nil { + c.exec = &invoke.DefaultExec{ + RawExec: &invoke.RawExec{Stderr: os.Stderr}, + PluginDecoder: version.PluginDecoder{}, + } + } + return c.exec +} + +func getResultCacheFilePath(netName string, rt *RuntimeConf) string { + cacheDir := rt.CacheDir + if cacheDir == "" { + cacheDir = CacheDir + } + return filepath.Join(cacheDir, "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)) +} + +func setCachedResult(result types.Result, netName string, rt *RuntimeConf) error { + data, err := json.Marshal(result) + if err != nil { + return err + } + fname := getResultCacheFilePath(netName, rt) + if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + return err + } + return ioutil.WriteFile(fname, data, 0600) +} + +func delCachedResult(netName string, rt *RuntimeConf) error { + fname := getResultCacheFilePath(netName, rt) + return os.Remove(fname) +} + +func getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname := getResultCacheFilePath(netName, rt) + data, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + // Read the version of the cached result + decoder := version.ConfigDecoder{} + resultCniVersion, err := decoder.Decode(data) + if err != nil { + return nil, err + } + + // Ensure we can understand the result + result, err := version.NewResult(resultCniVersion, data) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil && resultCniVersion != cniVersion { + return nil, fmt.Errorf("failed to convert cached result version %q to config version %q: %v", resultCniVersion, cniVersion, err) + } + return result, err +} + +// GetNetworkListCachedResult returns the cached Result of the previous +// previous AddNetworkList() operation for a network list, or an error. +func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + return getCachedResult(list.Name, list.CNIVersion, rt) +} + +// GetNetworkCachedResult returns the cached Result of the previous +// previous AddNetwork() operation for a network, or an error. +func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + return getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) +} + +func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return nil, err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return nil, err + } + + return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec) +} + +// AddNetworkList executes a sequence of plugins with the ADD command +func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + var err error + var result types.Result + for _, net := range list.Plugins { + result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt) + if err != nil { + return nil, err + } + } + + if err = setCachedResult(result, list.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %v", list.Name, err) + } + + return result, nil +} + +func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec) +} + +// CheckNetworkList executes a sequence of plugins with the CHECK command +func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion) + } + + if list.DisableCheck { + return nil + } + + cachedResult, err := getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err) + } + + for _, net := range list.Plugins { + if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + } + + return nil +} + +func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec) +} + +// DelNetworkList executes a sequence of plugins with the DEL command +func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %v", list.Name, err) + } + } + + for i := len(list.Plugins) - 1; i >= 0; i-- { + net := list.Plugins[i] + if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + } + _ = delCachedResult(list.Name, rt) + + return nil +} + +// AddNetwork executes the plugin with the ADD command +func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt) + if err != nil { + return nil, err + } + + if err = setCachedResult(result, net.Network.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %v", net.Network.Name, err) + } + + return result, nil +} + +// CheckNetwork executes the plugin with the CHECK command +func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion) + } + + cachedResult, err := getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err) + } + return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt) +} + +// DelNetwork executes the plugin with the DEL command +func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %v", net.Network.Name, err) + } + } + + if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + _ = delCachedResult(net.Network.Name, rt) + return nil +} + +// ValidateNetworkList checks that a configuration is reasonably valid. +// - all the specified plugins exist on disk +// - every plugin supports the desired version. +// +// Returns a list of all capabilities supported by the configuration, or error +func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) { + version := list.CNIVersion + + // holding map for seen caps (in case of duplicates) + caps := map[string]interface{}{} + + errs := []error{} + for _, net := range list.Plugins { + if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil { + errs = append(errs, err) + } + for c, enabled := range net.Network.Capabilities { + if !enabled { + continue + } + caps[c] = struct{}{} + } + } + + if len(errs) > 0 { + return nil, fmt.Errorf("%v", errs) + } + + // make caps list + cc := make([]string, 0, len(caps)) + for c := range caps { + cc = append(cc, c) + } + + return cc, nil +} + +// ValidateNetwork checks that a configuration is reasonably valid. +// It uses the same logic as ValidateNetworkList) +// Returns a list of capabilities +func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) { + caps := []string{} + for c, ok := range net.Network.Capabilities { + if ok { + caps = append(caps, c) + } + } + if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil { + return nil, err + } + return caps, nil +} + +// validatePlugin checks that an individual plugin's configuration is sane +func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error { + pluginPath, err := invoke.FindInPath(pluginName, c.Path) + if err != nil { + return err + } + + vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec) + if err != nil { + return err + } + for _, vers := range vi.SupportedVersions() { + if vers == expectedVersion { + return nil + } + } + return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion) +} + +// GetVersionInfo reports which versions of the CNI spec are supported by +// the given plugin. +func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginType, c.Path) + if err != nil { + return nil, err + } + + return invoke.GetVersionInfo(ctx, pluginPath, c.exec) +} + +// ===== +func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args { + return &invoke.Args{ + Command: action, + ContainerID: rt.ContainerID, + NetNS: rt.NetNS, + PluginArgs: rt.Args, + IfName: rt.IfName, + Path: strings.Join(c.Path, string(os.PathListSeparator)), + } +} diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go new file mode 100644 index 00000000..ea56c509 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -0,0 +1,268 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" +) + +type NotFoundError struct { + Dir string + Name string +} + +func (e NotFoundError) Error() string { + return fmt.Sprintf(`no net configuration with name "%s" in %s`, e.Name, e.Dir) +} + +type NoConfigsFoundError struct { + Dir string +} + +func (e NoConfigsFoundError) Error() string { + return fmt.Sprintf(`no net configurations found in %s`, e.Dir) +} + +func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { + conf := &NetworkConfig{Bytes: bytes} + if err := json.Unmarshal(bytes, &conf.Network); err != nil { + return nil, fmt.Errorf("error parsing configuration: %s", err) + } + if conf.Network.Type == "" { + return nil, fmt.Errorf("error parsing configuration: missing 'type'") + } + return conf, nil +} + +func ConfFromFile(filename string) (*NetworkConfig, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %s", filename, err) + } + return ConfFromBytes(bytes) +} + +func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { + rawList := make(map[string]interface{}) + if err := json.Unmarshal(bytes, &rawList); err != nil { + return nil, fmt.Errorf("error parsing configuration list: %s", err) + } + + rawName, ok := rawList["name"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no name") + } + name, ok := rawName.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid name type %T", rawName) + } + + var cniVersion string + rawVersion, ok := rawList["cniVersion"] + if ok { + cniVersion, ok = rawVersion.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion type %T", rawVersion) + } + } + + disableCheck := false + if rawDisableCheck, ok := rawList["disableCheck"]; ok { + disableCheck, ok = rawDisableCheck.(bool) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck) + } + } + + list := &NetworkConfigList{ + Name: name, + DisableCheck: disableCheck, + CNIVersion: cniVersion, + Bytes: bytes, + } + + var plugins []interface{} + plug, ok := rawList["plugins"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key") + } + plugins, ok = plug.([]interface{}) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug) + } + if len(plugins) == 0 { + return nil, fmt.Errorf("error parsing configuration list: no plugins in list") + } + + for i, conf := range plugins { + newBytes, err := json.Marshal(conf) + if err != nil { + return nil, fmt.Errorf("Failed to marshal plugin config %d: %v", i, err) + } + netConf, err := ConfFromBytes(newBytes) + if err != nil { + return nil, fmt.Errorf("Failed to parse plugin config %d: %v", i, err) + } + list.Plugins = append(list.Plugins, netConf) + } + + return list, nil +} + +func ConfListFromFile(filename string) (*NetworkConfigList, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %s", filename, err) + } + return ConfListFromBytes(bytes) +} + +func ConfFiles(dir string, extensions []string) ([]string, error) { + // In part, adapted from rkt/networking/podenv.go#listFiles + files, err := ioutil.ReadDir(dir) + switch { + case err == nil: // break + case os.IsNotExist(err): + return nil, nil + default: + return nil, err + } + + confFiles := []string{} + for _, f := range files { + if f.IsDir() { + continue + } + fileExt := filepath.Ext(f.Name()) + for _, ext := range extensions { + if fileExt == ext { + confFiles = append(confFiles, filepath.Join(dir, f.Name())) + } + } + } + return confFiles, nil +} + +func LoadConf(dir, name string) (*NetworkConfig, error) { + files, err := ConfFiles(dir, []string{".conf", ".json"}) + switch { + case err != nil: + return nil, err + case len(files) == 0: + return nil, NoConfigsFoundError{Dir: dir} + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Network.Name == name { + return conf, nil + } + } + return nil, NotFoundError{dir, name} +} + +func LoadConfList(dir, name string) (*NetworkConfigList, error) { + files, err := ConfFiles(dir, []string{".conflist"}) + if err != nil { + return nil, err + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfListFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Name == name { + return conf, nil + } + } + + // Try and load a network configuration file (instead of list) + // from the same name, then upconvert. + singleConf, err := LoadConf(dir, name) + if err != nil { + // A little extra logic so the error makes sense + if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok { + // Config lists found but no config files found + return nil, NotFoundError{dir, name} + } + + return nil, err + } + return ConfListFromConf(singleConf) +} + +func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) { + config := make(map[string]interface{}) + err := json.Unmarshal(original.Bytes, &config) + if err != nil { + return nil, fmt.Errorf("unmarshal existing network bytes: %s", err) + } + + for key, value := range newValues { + if key == "" { + return nil, fmt.Errorf("keys cannot be empty") + } + + if value == nil { + return nil, fmt.Errorf("key '%s' value must not be nil", key) + } + + config[key] = value + } + + newBytes, err := json.Marshal(config) + if err != nil { + return nil, err + } + + return ConfFromBytes(newBytes) +} + +// ConfListFromConf "upconverts" a network config in to a NetworkConfigList, +// with the single network as the only entry in the list. +func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) { + // Re-deserialize the config's json, then make a raw map configlist. + // This may seem a bit strange, but it's to make the Bytes fields + // actually make sense. Otherwise, the generated json is littered with + // golang default values. + + rawConfig := make(map[string]interface{}) + if err := json.Unmarshal(original.Bytes, &rawConfig); err != nil { + return nil, err + } + + rawConfigList := map[string]interface{}{ + "name": original.Network.Name, + "cniVersion": original.Network.CNIVersion, + "plugins": []interface{}{rawConfig}, + } + + b, err := json.Marshal(rawConfigList) + if err != nil { + return nil, err + } + return ConfListFromBytes(b) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go new file mode 100644 index 00000000..913528c1 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go @@ -0,0 +1,128 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "strings" +) + +type CNIArgs interface { + // For use with os/exec; i.e., return nil to inherit the + // environment from this process + // For use in delegation; inherit the environment from this + // process and allow overrides + AsEnv() []string +} + +type inherited struct{} + +var inheritArgsFromEnv inherited + +func (_ *inherited) AsEnv() []string { + return nil +} + +func ArgsFromEnv() CNIArgs { + return &inheritArgsFromEnv +} + +type Args struct { + Command string + ContainerID string + NetNS string + PluginArgs [][2]string + PluginArgsStr string + IfName string + Path string +} + +// Args implements the CNIArgs interface +var _ CNIArgs = &Args{} + +func (args *Args) AsEnv() []string { + env := os.Environ() + pluginArgsStr := args.PluginArgsStr + if pluginArgsStr == "" { + pluginArgsStr = stringify(args.PluginArgs) + } + + // Duplicated values which come first will be overrided, so we must put the + // custom values in the end to avoid being overrided by the process environments. + env = append(env, + "CNI_COMMAND="+args.Command, + "CNI_CONTAINERID="+args.ContainerID, + "CNI_NETNS="+args.NetNS, + "CNI_ARGS="+pluginArgsStr, + "CNI_IFNAME="+args.IfName, + "CNI_PATH="+args.Path, + ) + return dedupEnv(env) +} + +// taken from rkt/networking/net_plugin.go +func stringify(pluginArgs [][2]string) string { + entries := make([]string, len(pluginArgs)) + + for i, kv := range pluginArgs { + entries[i] = strings.Join(kv[:], "=") + } + + return strings.Join(entries, ";") +} + +// DelegateArgs implements the CNIArgs interface +// used for delegation to inherit from environments +// and allow some overrides like CNI_COMMAND +var _ CNIArgs = &DelegateArgs{} + +type DelegateArgs struct { + Command string +} + +func (d *DelegateArgs) AsEnv() []string { + env := os.Environ() + + // The custom values should come in the end to override the existing + // process environment of the same key. + env = append(env, + "CNI_COMMAND="+d.Command, + ) + return dedupEnv(env) +} + +// dedupEnv returns a copy of env with any duplicates removed, in favor of later values. +// Items not of the normal environment "key=value" form are preserved unchanged. +func dedupEnv(env []string) []string { + out := make([]string, 0, len(env)) + envMap := map[string]string{} + + for _, kv := range env { + // find the first "=" in environment, if not, just keep it + eq := strings.Index(kv, "=") + if eq < 0 { + out = append(out, kv) + continue + } + envMap[kv[:eq]] = kv[eq+1:] + } + + for k, v := range envMap { + out = append(out, fmt.Sprintf("%s=%s", k, v)) + } + + return out +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go new file mode 100644 index 00000000..8defe4dd --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go @@ -0,0 +1,80 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "os" + "path/filepath" + + "github.com/containernetworking/cni/pkg/types" +) + +func delegateCommon(delegatePlugin string, exec Exec) (string, Exec, error) { + if exec == nil { + exec = defaultExec + } + + paths := filepath.SplitList(os.Getenv("CNI_PATH")) + pluginPath, err := exec.FindInPath(delegatePlugin, paths) + if err != nil { + return "", nil, err + } + + return pluginPath, exec, nil +} + +// DelegateAdd calls the given delegate plugin with the CNI ADD action and +// JSON configuration +func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return nil, err + } + + // DelegateAdd will override the original "CNI_COMMAND" env from process with ADD + return ExecPluginWithResult(ctx, pluginPath, netconf, delegateArgs("ADD"), realExec) +} + +// DelegateCheck calls the given delegate plugin with the CNI CHECK action and +// JSON configuration +func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateCheck will override the original CNI_COMMAND env from process with CHECK + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec) +} + +// DelegateDel calls the given delegate plugin with the CNI DEL action and +// JSON configuration +func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateDel will override the original CNI_COMMAND env from process with DEL + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec) +} + +// return CNIArgs used by delegation +func delegateArgs(action string) *DelegateArgs { + return &DelegateArgs{ + Command: action, + } +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go new file mode 100644 index 00000000..8e6d30b8 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -0,0 +1,144 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "fmt" + "os" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/version" +) + +// Exec is an interface encapsulates all operations that deal with finding +// and executing a CNI plugin. Tests may provide a fake implementation +// to avoid writing fake plugins to temporary directories during the test. +type Exec interface { + ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) + FindInPath(plugin string, paths []string) (string, error) + Decode(jsonBytes []byte) (version.PluginInfo, error) +} + +// For example, a testcase could pass an instance of the following fakeExec +// object to ExecPluginWithResult() to verify the incoming stdin and environment +// and provide a tailored response: +// +//import ( +// "encoding/json" +// "path" +// "strings" +//) +// +//type fakeExec struct { +// version.PluginDecoder +//} +// +//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { +// net := &types.NetConf{} +// err := json.Unmarshal(stdinData, net) +// if err != nil { +// return nil, fmt.Errorf("failed to unmarshal configuration: %v", err) +// } +// pluginName := path.Base(pluginPath) +// if pluginName != net.Type { +// return nil, fmt.Errorf("plugin name %q did not match config type %q", pluginName, net.Type) +// } +// for _, e := range environ { +// // Check environment for forced failure request +// parts := strings.Split(e, "=") +// if len(parts) > 0 && parts[0] == "FAIL" { +// return nil, fmt.Errorf("failed to execute plugin %s", pluginName) +// } +// } +// return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil +//} +// +//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { +// if len(paths) > 0 { +// return path.Join(paths[0], plugin), nil +// } +// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) +//} + +func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { + if exec == nil { + exec = defaultExec + } + + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + if err != nil { + return nil, err + } + + // Plugin must return result in same version as specified in netconf + versionDecoder := &version.ConfigDecoder{} + confVersion, err := versionDecoder.Decode(netconf) + if err != nil { + return nil, err + } + + return version.NewResult(confVersion, stdoutBytes) +} + +func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error { + if exec == nil { + exec = defaultExec + } + _, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + return err +} + +// GetVersionInfo returns the version information available about the plugin. +// For recent-enough plugins, it uses the information returned by the VERSION +// command. For older plugins which do not recognize that command, it reports +// version 0.1.0 +func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) { + if exec == nil { + exec = defaultExec + } + args := &Args{ + Command: "VERSION", + + // set fake values required by plugins built against an older version of skel + NetNS: "dummy", + IfName: "dummy", + Path: "dummy", + } + stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current())) + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv()) + if err != nil { + if err.Error() == "unknown CNI_COMMAND: VERSION" { + return version.PluginSupports("0.1.0"), nil + } + return nil, err + } + + return exec.Decode(stdoutBytes) +} + +// DefaultExec is an object that implements the Exec interface which looks +// for and executes plugins from disk. +type DefaultExec struct { + *RawExec + version.PluginDecoder +} + +// DefaultExec implements the Exec interface +var _ Exec = &DefaultExec{} + +var defaultExec = &DefaultExec{ + RawExec: &RawExec{Stderr: os.Stderr}, +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/find.go b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go new file mode 100644 index 00000000..e815404c --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go @@ -0,0 +1,43 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "path/filepath" +) + +// FindInPath returns the full path of the plugin by searching in the provided path +func FindInPath(plugin string, paths []string) (string, error) { + if plugin == "" { + return "", fmt.Errorf("no plugin name provided") + } + + if len(paths) == 0 { + return "", fmt.Errorf("no paths provided") + } + + for _, path := range paths { + for _, fe := range ExecutableFileExtensions { + fullpath := filepath.Join(path, plugin) + fe + if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { + return fullpath, nil + } + } + } + + return "", fmt.Errorf("failed to find plugin %q in path %s", plugin, paths) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go new file mode 100644 index 00000000..9bcfb455 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go @@ -0,0 +1,20 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go new file mode 100644 index 00000000..7665125b --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go @@ -0,0 +1,18 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{".exe", ""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go new file mode 100644 index 00000000..ad8498ba --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go @@ -0,0 +1,62 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os/exec" + + "github.com/containernetworking/cni/pkg/types" +) + +type RawExec struct { + Stderr io.Writer +} + +func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) { + stdout := &bytes.Buffer{} + c := exec.CommandContext(ctx, pluginPath) + c.Env = environ + c.Stdin = bytes.NewBuffer(stdinData) + c.Stdout = stdout + c.Stderr = e.Stderr + if err := c.Run(); err != nil { + return nil, pluginErr(err, stdout.Bytes()) + } + + return stdout.Bytes(), nil +} + +func pluginErr(err error, output []byte) error { + if _, ok := err.(*exec.ExitError); ok { + emsg := types.Error{} + if len(output) == 0 { + emsg.Msg = "netplugin failed with no error message" + } else if perr := json.Unmarshal(output, &emsg); perr != nil { + emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(output), perr) + } + return &emsg + } + + return err +} + +func (e *RawExec) FindInPath(plugin string, paths []string) (string, error) { + return FindInPath(plugin, paths) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go new file mode 100644 index 00000000..53256167 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go @@ -0,0 +1,140 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types020 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" +) + +const ImplementedSpecVersion string = "0.2.0" + +var SupportedVersions = []string{"", "0.1.0", ImplementedSpecVersion} + +// Compatibility types for CNI version 0.1.0 and 0.2.0 + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + return result, nil +} + +func GetResult(r types.Result) (*Result, error) { + // We expect version 0.1.0/0.2.0 results + result020, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := result020.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + IP4 *IPConfig `json:"ip4,omitempty"` + IP6 *IPConfig `json:"ip6,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func (r *Result) Version() string { + return ImplementedSpecVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + for _, supportedVersion := range SupportedVersions { + if version == supportedVersion { + r.CNIVersion = version + return r, nil + } + } + return nil, fmt.Errorf("cannot convert version %q to %s", SupportedVersions, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// String returns a formatted string in the form of "[IP4: $1,][ IP6: $2,] DNS: $3" where +// $1 represents the receiver's IPv4, $2 represents the receiver's IPv6 and $3 the +// receiver's DNS. If $1 or $2 are nil, they won't be present in the returned string. +func (r *Result) String() string { + var str string + if r.IP4 != nil { + str = fmt.Sprintf("IP4:%+v, ", *r.IP4) + } + if r.IP6 != nil { + str += fmt.Sprintf("IP6:%+v, ", *r.IP6) + } + return fmt.Sprintf("%sDNS:%+v", str, r.DNS) +} + +// IPConfig contains values necessary to configure an interface +type IPConfig struct { + IP net.IPNet + Gateway net.IP + Routes []types.Route +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type ipConfig struct { + IP types.IPNet `json:"ip"` + Gateway net.IP `json:"gateway,omitempty"` + Routes []types.Route `json:"routes,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + IP: types.IPNet(c.IP), + Gateway: c.Gateway, + Routes: c.Routes, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.IP = net.IPNet(ipc.IP) + c.Gateway = ipc.Gateway + c.Routes = ipc.Routes + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go new file mode 100644 index 00000000..bd8640fc --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go @@ -0,0 +1,112 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding" + "fmt" + "reflect" + "strings" +) + +// UnmarshallableBool typedef for builtin bool +// because builtin type's methods can't be declared +type UnmarshallableBool bool + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns boolean true if the string is "1" or "[Tt]rue" +// Returns boolean false if the string is "0" or "[Ff]alse" +func (b *UnmarshallableBool) UnmarshalText(data []byte) error { + s := strings.ToLower(string(data)) + switch s { + case "1", "true": + *b = true + case "0", "false": + *b = false + default: + return fmt.Errorf("Boolean unmarshal error: invalid input %s", s) + } + return nil +} + +// UnmarshallableString typedef for builtin string +type UnmarshallableString string + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns the string +func (s *UnmarshallableString) UnmarshalText(data []byte) error { + *s = UnmarshallableString(data) + return nil +} + +// CommonArgs contains the IgnoreUnknown argument +// and must be embedded by all Arg structs +type CommonArgs struct { + IgnoreUnknown UnmarshallableBool `json:"ignoreunknown,omitempty"` +} + +// GetKeyField is a helper function to receive Values +// Values that represent a pointer to a struct +func GetKeyField(keyString string, v reflect.Value) reflect.Value { + return v.Elem().FieldByName(keyString) +} + +// UnmarshalableArgsError is used to indicate error unmarshalling args +// from the args-string in the form "K=V;K2=V2;..." +type UnmarshalableArgsError struct { + error +} + +// LoadArgs parses args from a string in the form "K=V;K2=V2;..." +func LoadArgs(args string, container interface{}) error { + if args == "" { + return nil + } + + containerValue := reflect.ValueOf(container) + + pairs := strings.Split(args, ";") + unknownArgs := []string{} + for _, pair := range pairs { + kv := strings.Split(pair, "=") + if len(kv) != 2 { + return fmt.Errorf("ARGS: invalid pair %q", pair) + } + keyString := kv[0] + valueString := kv[1] + keyField := GetKeyField(keyString, containerValue) + if !keyField.IsValid() { + unknownArgs = append(unknownArgs, pair) + continue + } + keyFieldIface := keyField.Addr().Interface() + u, ok := keyFieldIface.(encoding.TextUnmarshaler) + if !ok { + return UnmarshalableArgsError{fmt.Errorf( + "ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler", + keyString, reflect.TypeOf(keyFieldIface))} + } + err := u.UnmarshalText([]byte(valueString)) + if err != nil { + return fmt.Errorf("ARGS: error parsing value of pair %q: %v)", pair, err) + } + } + + isIgnoreUnknown := GetKeyField("IgnoreUnknown", containerValue).Bool() + if len(unknownArgs) > 0 && !isIgnoreUnknown { + return fmt.Errorf("ARGS: unknown args %q", unknownArgs) + } + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/current/types.go b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go new file mode 100644 index 00000000..7267a2e6 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/current/types.go @@ -0,0 +1,293 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package current + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/020" +) + +const ImplementedSpecVersion string = "0.4.0" + +var SupportedVersions = []string{"0.3.0", "0.3.1", ImplementedSpecVersion} + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + return result, nil +} + +func GetResult(r types.Result) (*Result, error) { + resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := resultCurrent.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +var resultConverters = []struct { + versions []string + convert func(types.Result) (*Result, error) +}{ + {types020.SupportedVersions, convertFrom020}, + {SupportedVersions, convertFrom030}, +} + +func convertFrom020(result types.Result) (*Result, error) { + oldResult, err := types020.GetResult(result) + if err != nil { + return nil, err + } + + newResult := &Result{ + CNIVersion: ImplementedSpecVersion, + DNS: oldResult.DNS, + Routes: []*types.Route{}, + } + + if oldResult.IP4 != nil { + newResult.IPs = append(newResult.IPs, &IPConfig{ + Version: "4", + Address: oldResult.IP4.IP, + Gateway: oldResult.IP4.Gateway, + }) + for _, route := range oldResult.IP4.Routes { + newResult.Routes = append(newResult.Routes, &types.Route{ + Dst: route.Dst, + GW: route.GW, + }) + } + } + + if oldResult.IP6 != nil { + newResult.IPs = append(newResult.IPs, &IPConfig{ + Version: "6", + Address: oldResult.IP6.IP, + Gateway: oldResult.IP6.Gateway, + }) + for _, route := range oldResult.IP6.Routes { + newResult.Routes = append(newResult.Routes, &types.Route{ + Dst: route.Dst, + GW: route.GW, + }) + } + } + + return newResult, nil +} + +func convertFrom030(result types.Result) (*Result, error) { + newResult, ok := result.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + newResult.CNIVersion = ImplementedSpecVersion + return newResult, nil +} + +func NewResultFromResult(result types.Result) (*Result, error) { + version := result.Version() + for _, converter := range resultConverters { + for _, supportedVersion := range converter.versions { + if version == supportedVersion { + return converter.convert(result) + } + } + } + return nil, fmt.Errorf("unsupported CNI result22 version %q", version) +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + Interfaces []*Interface `json:"interfaces,omitempty"` + IPs []*IPConfig `json:"ips,omitempty"` + Routes []*types.Route `json:"routes,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +// Convert to the older 0.2.0 CNI spec Result type +func (r *Result) convertTo020() (*types020.Result, error) { + oldResult := &types020.Result{ + CNIVersion: types020.ImplementedSpecVersion, + DNS: r.DNS, + } + + for _, ip := range r.IPs { + // Only convert the first IP address of each version as 0.2.0 + // and earlier cannot handle multiple IP addresses + if ip.Version == "4" && oldResult.IP4 == nil { + oldResult.IP4 = &types020.IPConfig{ + IP: ip.Address, + Gateway: ip.Gateway, + } + } else if ip.Version == "6" && oldResult.IP6 == nil { + oldResult.IP6 = &types020.IPConfig{ + IP: ip.Address, + Gateway: ip.Gateway, + } + } + + if oldResult.IP4 != nil && oldResult.IP6 != nil { + break + } + } + + for _, route := range r.Routes { + is4 := route.Dst.IP.To4() != nil + if is4 && oldResult.IP4 != nil { + oldResult.IP4.Routes = append(oldResult.IP4.Routes, types.Route{ + Dst: route.Dst, + GW: route.GW, + }) + } else if !is4 && oldResult.IP6 != nil { + oldResult.IP6.Routes = append(oldResult.IP6.Routes, types.Route{ + Dst: route.Dst, + GW: route.GW, + }) + } + } + + if oldResult.IP4 == nil && oldResult.IP6 == nil { + return nil, fmt.Errorf("cannot convert: no valid IP addresses") + } + + return oldResult, nil +} + +func (r *Result) Version() string { + return ImplementedSpecVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + switch version { + case "0.3.0", "0.3.1", ImplementedSpecVersion: + r.CNIVersion = version + return r, nil + case types020.SupportedVersions[0], types020.SupportedVersions[1], types020.SupportedVersions[2]: + return r.convertTo020() + } + return nil, fmt.Errorf("cannot convert version 0.3.x to %q", version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// String returns a formatted string in the form of "[Interfaces: $1,][ IP: $2,] DNS: $3" where +// $1 represents the receiver's Interfaces, $2 represents the receiver's IP addresses and $3 the +// receiver's DNS. If $1 or $2 are nil, they won't be present in the returned string. +func (r *Result) String() string { + var str string + if len(r.Interfaces) > 0 { + str += fmt.Sprintf("Interfaces:%+v, ", r.Interfaces) + } + if len(r.IPs) > 0 { + str += fmt.Sprintf("IP:%+v, ", r.IPs) + } + if len(r.Routes) > 0 { + str += fmt.Sprintf("Routes:%+v, ", r.Routes) + } + return fmt.Sprintf("%sDNS:%+v", str, r.DNS) +} + +// Convert this old version result to the current CNI version result +func (r *Result) Convert() (*Result, error) { + return r, nil +} + +// Interface contains values about the created interfaces +type Interface struct { + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Sandbox string `json:"sandbox,omitempty"` +} + +func (i *Interface) String() string { + return fmt.Sprintf("%+v", *i) +} + +// Int returns a pointer to the int value passed in. Used to +// set the IPConfig.Interface field. +func Int(v int) *int { + return &v +} + +// IPConfig contains values necessary to configure an IP address on an interface +type IPConfig struct { + // IP version, either "4" or "6" + Version string + // Index into Result structs Interfaces list + Interface *int + Address net.IPNet + Gateway net.IP +} + +func (i *IPConfig) String() string { + return fmt.Sprintf("%+v", *i) +} + +// JSON (un)marshallable types +type ipConfig struct { + Version string `json:"version"` + Interface *int `json:"interface,omitempty"` + Address types.IPNet `json:"address"` + Gateway net.IP `json:"gateway,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + Version: c.Version, + Interface: c.Interface, + Address: types.IPNet(c.Address), + Gateway: c.Gateway, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.Version = ipc.Version + c.Interface = ipc.Interface + c.Address = net.IPNet(ipc.Address) + c.Gateway = ipc.Gateway + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go new file mode 100644 index 00000000..d0d11006 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -0,0 +1,199 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net" + "os" +) + +// like net.IPNet but adds JSON marshalling and unmarshalling +type IPNet net.IPNet + +// ParseCIDR takes a string like "10.2.3.1/24" and +// return IPNet with "10.2.3.1" and /24 mask +func ParseCIDR(s string) (*net.IPNet, error) { + ip, ipn, err := net.ParseCIDR(s) + if err != nil { + return nil, err + } + + ipn.IP = ip + return ipn, nil +} + +func (n IPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(&n).String()) +} + +func (n *IPNet) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + tmp, err := ParseCIDR(s) + if err != nil { + return err + } + + *n = IPNet(*tmp) + return nil +} + +// NetConf describes a network. +type NetConf struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Capabilities map[string]bool `json:"capabilities,omitempty"` + IPAM IPAM `json:"ipam,omitempty"` + DNS DNS `json:"dns"` + + RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` + PrevResult Result `json:"-"` +} + +type IPAM struct { + Type string `json:"type,omitempty"` +} + +// NetConfList describes an ordered list of networks. +type NetConfList struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + DisableCheck bool `json:"disableCheck,omitempty"` + Plugins []*NetConf `json:"plugins,omitempty"` +} + +type ResultFactoryFunc func([]byte) (Result, error) + +// Result is an interface that provides the result of plugin execution +type Result interface { + // The highest CNI specification result version the result supports + // without having to convert + Version() string + + // Returns the result converted into the requested CNI specification + // result version, or an error if conversion failed + GetAsVersion(version string) (Result, error) + + // Prints the result in JSON format to stdout + Print() error + + // Prints the result in JSON format to provided writer + PrintTo(writer io.Writer) error + + // Returns a JSON string representation of the result + String() string +} + +func PrintResult(result Result, version string) error { + newResult, err := result.GetAsVersion(version) + if err != nil { + return err + } + return newResult.Print() +} + +// DNS contains values interesting for DNS resolvers +type DNS struct { + Nameservers []string `json:"nameservers,omitempty"` + Domain string `json:"domain,omitempty"` + Search []string `json:"search,omitempty"` + Options []string `json:"options,omitempty"` +} + +type Route struct { + Dst net.IPNet + GW net.IP +} + +func (r *Route) String() string { + return fmt.Sprintf("%+v", *r) +} + +// Well known error codes +// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes +const ( + ErrUnknown uint = iota // 0 + ErrIncompatibleCNIVersion // 1 + ErrUnsupportedField // 2 +) + +type Error struct { + Code uint `json:"code"` + Msg string `json:"msg"` + Details string `json:"details,omitempty"` +} + +func (e *Error) Error() string { + details := "" + if e.Details != "" { + details = fmt.Sprintf("; %v", e.Details) + } + return fmt.Sprintf("%v%v", e.Msg, details) +} + +func (e *Error) Print() error { + return prettyPrint(e) +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type route struct { + Dst IPNet `json:"dst"` + GW net.IP `json:"gw,omitempty"` +} + +func (r *Route) UnmarshalJSON(data []byte) error { + rt := route{} + if err := json.Unmarshal(data, &rt); err != nil { + return err + } + + r.Dst = net.IPNet(rt.Dst) + r.GW = rt.GW + return nil +} + +func (r Route) MarshalJSON() ([]byte, error) { + rt := route{ + Dst: IPNet(r.Dst), + GW: r.GW, + } + + return json.Marshal(rt) +} + +func prettyPrint(obj interface{}) error { + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + _, err = os.Stdout.Write(data) + return err +} + +// NotImplementedError is used to indicate that a method is not implemented for the given platform +var NotImplementedError = errors.New("Not Implemented") diff --git a/vendor/github.com/containernetworking/cni/pkg/version/conf.go b/vendor/github.com/containernetworking/cni/pkg/version/conf.go new file mode 100644 index 00000000..3cca58bb --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/conf.go @@ -0,0 +1,37 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" +) + +// ConfigDecoder can decode the CNI version available in network config data +type ConfigDecoder struct{} + +func (*ConfigDecoder) Decode(jsonBytes []byte) (string, error) { + var conf struct { + CNIVersion string `json:"cniVersion"` + } + err := json.Unmarshal(jsonBytes, &conf) + if err != nil { + return "", fmt.Errorf("decoding version from network config: %s", err) + } + if conf.CNIVersion == "" { + return "0.1.0", nil + } + return conf.CNIVersion, nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go new file mode 100644 index 00000000..1df42724 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go @@ -0,0 +1,144 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + "io" + "strconv" + "strings" +) + +// PluginInfo reports information about CNI versioning +type PluginInfo interface { + // SupportedVersions returns one or more CNI spec versions that the plugin + // supports. If input is provided in one of these versions, then the plugin + // promises to use the same CNI version in its response + SupportedVersions() []string + + // Encode writes this CNI version information as JSON to the given Writer + Encode(io.Writer) error +} + +type pluginInfo struct { + CNIVersion_ string `json:"cniVersion"` + SupportedVersions_ []string `json:"supportedVersions,omitempty"` +} + +// pluginInfo implements the PluginInfo interface +var _ PluginInfo = &pluginInfo{} + +func (p *pluginInfo) Encode(w io.Writer) error { + return json.NewEncoder(w).Encode(p) +} + +func (p *pluginInfo) SupportedVersions() []string { + return p.SupportedVersions_ +} + +// PluginSupports returns a new PluginInfo that will report the given versions +// as supported +func PluginSupports(supportedVersions ...string) PluginInfo { + if len(supportedVersions) < 1 { + panic("programmer error: you must support at least one version") + } + return &pluginInfo{ + CNIVersion_: Current(), + SupportedVersions_: supportedVersions, + } +} + +// PluginDecoder can decode the response returned by a plugin's VERSION command +type PluginDecoder struct{} + +func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) { + var info pluginInfo + err := json.Unmarshal(jsonBytes, &info) + if err != nil { + return nil, fmt.Errorf("decoding version info: %s", err) + } + if info.CNIVersion_ == "" { + return nil, fmt.Errorf("decoding version info: missing field cniVersion") + } + if len(info.SupportedVersions_) == 0 { + if info.CNIVersion_ == "0.2.0" { + return PluginSupports("0.1.0", "0.2.0"), nil + } + return nil, fmt.Errorf("decoding version info: missing field supportedVersions") + } + return &info, nil +} + +// ParseVersion parses a version string like "3.0.1" or "0.4.5" into major, +// minor, and micro numbers or returns an error +func ParseVersion(version string) (int, int, int, error) { + var major, minor, micro int + if version == "" { + return -1, -1, -1, fmt.Errorf("invalid version %q: the version is empty", version) + } + + parts := strings.Split(version, ".") + if len(parts) >= 4 { + return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version) + } + + major, err := strconv.Atoi(parts[0]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert major version part %q: %v", parts[0], err) + } + + if len(parts) >= 2 { + minor, err = strconv.Atoi(parts[1]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert minor version part %q: %v", parts[1], err) + } + } + + if len(parts) >= 3 { + micro, err = strconv.Atoi(parts[2]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert micro version part %q: %v", parts[2], err) + } + } + + return major, minor, micro, nil +} + +// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro +// numbers, and compares them to determine whether the first version is greater +// than or equal to the second +func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { + firstMajor, firstMinor, firstMicro, err := ParseVersion(version) + if err != nil { + return false, err + } + + secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion) + if err != nil { + return false, err + } + + if firstMajor > secondMajor { + return true, nil + } else if firstMajor == secondMajor { + if firstMinor > secondMinor { + return true, nil + } else if firstMinor == secondMinor && firstMicro >= secondMicro { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go new file mode 100644 index 00000000..25c3810b --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go @@ -0,0 +1,49 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import "fmt" + +type ErrorIncompatible struct { + Config string + Supported []string +} + +func (e *ErrorIncompatible) Details() string { + return fmt.Sprintf("config is %q, plugin supports %q", e.Config, e.Supported) +} + +func (e *ErrorIncompatible) Error() string { + return fmt.Sprintf("incompatible CNI versions: %s", e.Details()) +} + +type Reconciler struct{} + +func (r *Reconciler) Check(configVersion string, pluginInfo PluginInfo) *ErrorIncompatible { + return r.CheckRaw(configVersion, pluginInfo.SupportedVersions()) +} + +func (*Reconciler) CheckRaw(configVersion string, supportedVersions []string) *ErrorIncompatible { + for _, supportedVersion := range supportedVersions { + if configVersion == supportedVersion { + return nil + } + } + + return &ErrorIncompatible{ + Config: configVersion, + Supported: supportedVersions, + } +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go new file mode 100644 index 00000000..8f3508e6 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go @@ -0,0 +1,83 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/020" + "github.com/containernetworking/cni/pkg/types/current" +) + +// Current reports the version of the CNI spec implemented by this library +func Current() string { + return "0.4.0" +} + +// Legacy PluginInfo describes a plugin that is backwards compatible with the +// CNI spec version 0.1.0. In particular, a runtime compiled against the 0.1.0 +// library ought to work correctly with a plugin that reports support for +// Legacy versions. +// +// Any future CNI spec versions which meet this definition should be added to +// this list. +var Legacy = PluginSupports("0.1.0", "0.2.0") +var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0") + +var resultFactories = []struct { + supportedVersions []string + newResult types.ResultFactoryFunc +}{ + {current.SupportedVersions, current.NewResult}, + {types020.SupportedVersions, types020.NewResult}, +} + +// Finds a Result object matching the requested version (if any) and asks +// that object to parse the plugin result, returning an error if parsing failed. +func NewResult(version string, resultBytes []byte) (types.Result, error) { + reconciler := &Reconciler{} + for _, resultFactory := range resultFactories { + err := reconciler.CheckRaw(version, resultFactory.supportedVersions) + if err == nil { + // Result supports this version + return resultFactory.newResult(resultBytes) + } + } + + return nil, fmt.Errorf("unsupported CNI result version %q", version) +} + +// ParsePrevResult parses a prevResult in a NetConf structure and sets +// the NetConf's PrevResult member to the parsed Result object. +func ParsePrevResult(conf *types.NetConf) error { + if conf.RawPrevResult == nil { + return nil + } + + resultBytes, err := json.Marshal(conf.RawPrevResult) + if err != nil { + return fmt.Errorf("could not serialize prevResult: %v", err) + } + + conf.RawPrevResult = nil + conf.PrevResult, err = NewResult(conf.CNIVersion, resultBytes) + if err != nil { + return fmt.Errorf("could not parse prevResult: %v", err) + } + + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go new file mode 100644 index 00000000..57f224af --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go @@ -0,0 +1,64 @@ +package dockerignore // import "github.com/docker/docker/builder/dockerignore" + +import ( + "bufio" + "bytes" + "fmt" + "io" + "path/filepath" + "strings" +) + +// ReadAll reads a .dockerignore file and returns the list of file patterns +// to ignore. Note this will trim whitespace from each line as well +// as use GO's "clean" func to get the shortest/cleanest path for each. +func ReadAll(reader io.Reader) ([]string, error) { + if reader == nil { + return nil, nil + } + + scanner := bufio.NewScanner(reader) + var excludes []string + currentLine := 0 + + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + pattern := string(scannedBytes) + currentLine++ + // Lines starting with # (comments) are ignored before processing + if strings.HasPrefix(pattern, "#") { + continue + } + pattern = strings.TrimSpace(pattern) + if pattern == "" { + continue + } + // normalize absolute paths to paths relative to the context + // (taking care of '!' prefix) + invert := pattern[0] == '!' + if invert { + pattern = strings.TrimSpace(pattern[1:]) + } + if len(pattern) > 0 { + pattern = filepath.Clean(pattern) + pattern = filepath.ToSlash(pattern) + if len(pattern) > 1 && pattern[0] == '/' { + pattern = pattern[1:] + } + } + if invert { + pattern = "!" + pattern + } + + excludes = append(excludes, pattern) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("Error reading .dockerignore: %v", err) + } + return excludes, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go new file mode 100644 index 00000000..83ed0c6b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go @@ -0,0 +1,106 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/user" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +func init() { + // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host + // environment not in the chroot from untrusted files. + _, _ = user.Lookup("docker") + _, _ = net.LookupHost("localhost") +} + +// NewArchiver returns a new Archiver which uses chrootarchive.Untar +func NewArchiver(idMapping *idtools.IdentityMapping) *archive.Archiver { + if idMapping == nil { + idMapping = &idtools.IdentityMapping{} + } + return &archive.Archiver{ + Untar: Untar, + IDMapping: idMapping, + } +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true, dest) +} + +// UntarWithRoot is the same as `Untar`, but allows you to pass in a root directory +// The root directory is the directory that will be chrooted to. +// `dest` must be a path within `root`, if it is not an error will be returned. +// +// `root` should set to a directory which is not controlled by any potentially +// malicious process. +// +// This should be used to prevent a potential attacker from manipulating `dest` +// such that it would provide access to files outside of `dest` through things +// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however +// sanitizing symlinks in this manner is inherrently racey: +// ref: CVE-2018-15664 +func UntarWithRoot(tarArchive io.Reader, dest string, options *archive.TarOptions, root string) error { + return untarHandler(tarArchive, dest, options, true, root) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false, dest) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool, root string) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMapping.RootPair() + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options, root) +} + +// Tar tars the requested path while chrooted to the specified root. +func Tar(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + if options == nil { + options = &archive.TarOptions{} + } + return invokePack(srcPath, options, root) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go new file mode 100644 index 00000000..864c3ac6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,208 @@ +// +build !windows + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/pkg/errors" +) + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options archive.TarOptions + + // read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + dst := flag.Arg(0) + var root string + if len(flag.Args()) > 1 { + root = flag.Arg(1) + } + + if root == "" { + root = dst + } + + if err := chroot(root); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, dst, &options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions, root string) error { + if root == "" { + return errors.New("must specify a root to chroot to") + } + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + if root != "" { + relDest, err := filepath.Rel(root, dest) + if err != nil { + return err + } + if relDest == "." { + relDest = "/" + } + if relDest[0] != '/' { + relDest = "/" + relDest + } + dest = relDest + } + + cmd := reexec.Command("docker-untar", dest, root) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + w.Close() + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + + // write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + w.Close() + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} + +func tar() { + runtime.LockOSThread() + flag.Parse() + + src := flag.Arg(0) + var root string + if len(flag.Args()) > 1 { + root = flag.Arg(1) + } + + if root == "" { + root = src + } + + if err := realChroot(root); err != nil { + fatal(err) + } + + var options archive.TarOptions + if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { + fatal(err) + } + + rdr, err := archive.TarWithOptions(src, &options) + if err != nil { + fatal(err) + } + defer rdr.Close() + + if _, err := io.Copy(os.Stdout, rdr); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + if root == "" { + return nil, errors.New("root path must not be empty") + } + + relSrc, err := filepath.Rel(root, srcPath) + if err != nil { + return nil, err + } + if relSrc == "." { + relSrc = "/" + } + if relSrc[0] != '/' { + relSrc = "/" + relSrc + } + + // make sure we didn't trim a trailing slash with the call to `Rel` + if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") { + relSrc += "/" + } + + cmd := reexec.Command("docker-tar", relSrc, root) + + errBuff := bytes.NewBuffer(nil) + cmd.Stderr = errBuff + + tarR, tarW := io.Pipe() + cmd.Stdout = tarW + + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, errors.Wrap(err, "error getting options pipe for tar process") + } + + if err := cmd.Start(); err != nil { + return nil, errors.Wrap(err, "tar error on re-exec cmd") + } + + go func() { + err := cmd.Wait() + err = errors.Wrapf(err, "error processing tar file: %s", errBuff) + tarW.CloseWithError(err) + }() + + if err := json.NewEncoder(stdin).Encode(options); err != nil { + stdin.Close() + return nil, errors.Wrap(err, "tar json encode to pipe failed") + } + stdin.Close() + + return tarR, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go new file mode 100644 index 00000000..de87113e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,29 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions, root string) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} + +func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the pack. We call inline instead within the daemon process. + return archive.TarWithOptions(srcPath, options) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 00000000..e0f4ebbb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,114 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/moby/sys/mount" + "github.com/moby/sys/mountinfo" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + // if the engine is running in a user namespace we need to use actual chroot + if rsystem.RunningInUserNS() { + return realChroot(path) + } + if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { + return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + } + + // Make everything in new ns slave. + // Don't use `private` here as this could race where the mountns gets a + // reference to a mount and an unmount from the host does not propagate, + // which could potentially cause transient errors for other operations, + // even though this should be relatively small window here `slave` should + // not cause any problems. + if err := mount.MakeRSlave("/"); err != nil { + return err + } + + if mounted, _ := mountinfo.Mounted(path); !mounted { + if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { + return realChroot(path) + } + } + + // setup oldRoot for pivot_root + pivotDir, err := ioutil.TempDir(path, ".pivot_root") + if err != nil { + return fmt.Errorf("Error setting up pivot dir: %v", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + if err == nil { + err = errCleanup + } + } + }() + + if err := unix.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root: %v", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { + return fmt.Errorf("Error making old root private after pivot: %v", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { + return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := unix.Chroot(path); err != nil { + return fmt.Errorf("Error after fallback to chroot: %v", err) + } + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root after chroot: %v", err) + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 00000000..8003136f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,16 @@ +// +build !windows,!linux + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import "golang.org/x/sys/unix" + +func chroot(path string) error { + if err := unix.Chroot(path); err != nil { + return err + } + return unix.Chdir("/") +} + +func realChroot(path string) error { + return chroot(path) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go new file mode 100644 index 00000000..7712cc17 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go @@ -0,0 +1,23 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go new file mode 100644 index 00000000..d96a09f8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,130 @@ +//+build !windows + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir string + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + inUserns := rsystem.RunningInUserNS() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if inUserns { + options.InUserNS = true + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + if rsystem.RunningInUserNS() { + options.InUserNS = true + } + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go new file mode 100644 index 00000000..8f3f3a4a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,45 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) + } + + return s, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go new file mode 100644 index 00000000..c24fea7d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go @@ -0,0 +1,29 @@ +// +build !windows + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) + reexec.Register("docker-tar", tar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go new file mode 100644 index 00000000..15ed874e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +func init() { +} diff --git a/vendor/github.com/docker/docker/pkg/locker/README.md b/vendor/github.com/docker/docker/pkg/locker/README.md new file mode 100644 index 00000000..ce787aef --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however, the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/docker/docker/pkg/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return i.data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + i.mu.Lock() + i.data[name] = data + i.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modifying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/docker/docker/pkg/locker/locker.go b/vendor/github.com/docker/docker/pkg/locker/locker.go new file mode 100644 index 00000000..dbd47fc4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker // import "github.com/docker/docker/pkg/locker" + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/README.md b/vendor/github.com/docker/docker/pkg/reexec/README.md new file mode 100644 index 00000000..6658f69b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/README.md @@ -0,0 +1,5 @@ +# reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go new file mode 100644 index 00000000..efea7179 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go @@ -0,0 +1,28 @@ +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" + "syscall" + + "golang.org/x/sys/unix" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which has Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: unix.SIGTERM, + }, + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go new file mode 100644 index 00000000..ceaabbde --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go @@ -0,0 +1,23 @@ +// +build freebsd darwin + +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go new file mode 100644 index 00000000..e7eed242 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux,!windows,!freebsd,!darwin + +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" +) + +func Self() string { + return "" +} + +// Command is unsupported on operating systems apart from Linux, Windows, and Darwin. +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go new file mode 100644 index 00000000..43822689 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go @@ -0,0 +1,21 @@ +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go new file mode 100644 index 00000000..f8ccddd5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/vendor/github.com/docker/docker/pkg/signal/README.md b/vendor/github.com/docker/docker/pkg/signal/README.md new file mode 100644 index 00000000..2b237a59 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file diff --git a/vendor/github.com/docker/docker/pkg/signal/signal.go b/vendor/github.com/docker/docker/pkg/signal/signal.go new file mode 100644 index 00000000..88ef7b5e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal.go @@ -0,0 +1,54 @@ +// Package signal provides helper functions for dealing with signals across +// various operating systems. +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "fmt" + "os" + "os/signal" + "strconv" + "strings" + "syscall" +) + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + var handledSigs []os.Signal + for _, s := range SignalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} + +// ParseSignal translates a string to a valid syscall signal. +// It returns an error if the signal map doesn't include the given signal. +func ParseSignal(rawSignal string) (syscall.Signal, error) { + s, err := strconv.Atoi(rawSignal) + if err == nil { + if s == 0 { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return syscall.Signal(s), nil + } + signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] + if !ok { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return signal, nil +} + +// ValidSignalForPlatform returns true if a signal is valid on the platform +func ValidSignalForPlatform(sig syscall.Signal) bool { + for _, v := range SignalMap { + if v == sig { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go new file mode 100644 index 00000000..ee5501e3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go @@ -0,0 +1,41 @@ +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" +) + +// SignalMap is a map of Darwin signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUG": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go new file mode 100644 index 00000000..764f90e2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go @@ -0,0 +1,43 @@ +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" +) + +// SignalMap is a map of FreeBSD signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "THR": syscall.SIGTHR, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go new file mode 100644 index 00000000..4013bded --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go @@ -0,0 +1,83 @@ +// +build !mips,!mipsle,!mips64,!mips64le + +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 +) + +// SignalMap is a map of Linux signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": unix.SIGABRT, + "ALRM": unix.SIGALRM, + "BUS": unix.SIGBUS, + "CHLD": unix.SIGCHLD, + "CLD": unix.SIGCLD, + "CONT": unix.SIGCONT, + "FPE": unix.SIGFPE, + "HUP": unix.SIGHUP, + "ILL": unix.SIGILL, + "INT": unix.SIGINT, + "IO": unix.SIGIO, + "IOT": unix.SIGIOT, + "KILL": unix.SIGKILL, + "PIPE": unix.SIGPIPE, + "POLL": unix.SIGPOLL, + "PROF": unix.SIGPROF, + "PWR": unix.SIGPWR, + "QUIT": unix.SIGQUIT, + "SEGV": unix.SIGSEGV, + "STKFLT": unix.SIGSTKFLT, + "STOP": unix.SIGSTOP, + "SYS": unix.SIGSYS, + "TERM": unix.SIGTERM, + "TRAP": unix.SIGTRAP, + "TSTP": unix.SIGTSTP, + "TTIN": unix.SIGTTIN, + "TTOU": unix.SIGTTOU, + "URG": unix.SIGURG, + "USR1": unix.SIGUSR1, + "USR2": unix.SIGUSR2, + "VTALRM": unix.SIGVTALRM, + "WINCH": unix.SIGWINCH, + "XCPU": unix.SIGXCPU, + "XFSZ": unix.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux_mipsx.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux_mipsx.go new file mode 100644 index 00000000..c78c887a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_linux_mipsx.go @@ -0,0 +1,84 @@ +// +build linux +// +build mips mipsle mips64 mips64le + +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + sigrtmin = 34 + sigrtmax = 127 +) + +// SignalMap is a map of Linux signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": unix.SIGABRT, + "ALRM": unix.SIGALRM, + "BUS": unix.SIGBUS, + "CHLD": unix.SIGCHLD, + "CLD": unix.SIGCLD, + "CONT": unix.SIGCONT, + "FPE": unix.SIGFPE, + "HUP": unix.SIGHUP, + "ILL": unix.SIGILL, + "INT": unix.SIGINT, + "IO": unix.SIGIO, + "IOT": unix.SIGIOT, + "KILL": unix.SIGKILL, + "PIPE": unix.SIGPIPE, + "POLL": unix.SIGPOLL, + "PROF": unix.SIGPROF, + "PWR": unix.SIGPWR, + "QUIT": unix.SIGQUIT, + "SEGV": unix.SIGSEGV, + "EMT": unix.SIGEMT, + "STOP": unix.SIGSTOP, + "SYS": unix.SIGSYS, + "TERM": unix.SIGTERM, + "TRAP": unix.SIGTRAP, + "TSTP": unix.SIGTSTP, + "TTIN": unix.SIGTTIN, + "TTOU": unix.SIGTTOU, + "URG": unix.SIGURG, + "USR1": unix.SIGUSR1, + "USR2": unix.SIGUSR2, + "VTALRM": unix.SIGVTALRM, + "WINCH": unix.SIGWINCH, + "XCPU": unix.SIGXCPU, + "XFSZ": unix.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go new file mode 100644 index 00000000..a2aa4248 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go @@ -0,0 +1,21 @@ +// +build !windows + +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) + +const ( + // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted. + SIGCHLD = syscall.SIGCHLD + // SIGWINCH is a signal sent to a process when its controlling terminal changes its size + SIGWINCH = syscall.SIGWINCH + // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading + SIGPIPE = syscall.SIGPIPE + // DefaultStopSignal is the syscall signal used to stop a container in unix systems. + DefaultStopSignal = "SIGTERM" +) diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go new file mode 100644 index 00000000..1fd25a83 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!darwin,!freebsd,!windows + +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" +) + +// SignalMap is an empty map of signals for unsupported platform. +var SignalMap = map[string]syscall.Signal{} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go new file mode 100644 index 00000000..65752f24 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go @@ -0,0 +1,26 @@ +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) +const ( + SIGCHLD = syscall.Signal(0xff) + SIGWINCH = syscall.Signal(0xff) + SIGPIPE = syscall.Signal(0xff) + // DefaultStopSignal is the syscall signal used to stop a container in windows systems. + DefaultStopSignal = "15" +) + +// SignalMap is a map of "supported" signals. As per the comment in GOLang's +// ztypes_windows.go: "More invented values for signals". Windows doesn't +// really support signals in any way, shape or form that Unix does. +// +// We have these so that docker kill can be used to gracefully (TERM) and +// forcibly (KILL) terminate a container on Windows. +var SignalMap = map[string]syscall.Signal{ + "KILL": syscall.SIGKILL, + "TERM": syscall.SIGTERM, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/trap.go b/vendor/github.com/docker/docker/pkg/signal/trap.go new file mode 100644 index 00000000..a277b956 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/trap.go @@ -0,0 +1,104 @@ +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "fmt" + "os" + gosignal "os/signal" + "path/filepath" + "runtime" + "strings" + "sync/atomic" + "syscall" + "time" + + "github.com/pkg/errors" +) + +// Trap sets up a simplified signal "trap", appropriate for common +// behavior expected from a vanilla unix command-line tool in general +// (and the Docker engine in particular). +// +// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. +// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is +// skipped and the process is terminated immediately (allows force quit of stuck daemon) +// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. +// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while +// the docker daemon is not restarted and also running under systemd. +// Fixes https://github.com/docker/docker/issues/19728 +// +func Trap(cleanup func(), logger interface { + Info(args ...interface{}) +}) { + c := make(chan os.Signal, 1) + // we will handle INT, TERM, QUIT, SIGPIPE here + signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE} + gosignal.Notify(c, signals...) + go func() { + interruptCount := uint32(0) + for sig := range c { + if sig == syscall.SIGPIPE { + continue + } + + go func(sig os.Signal) { + logger.Info(fmt.Sprintf("Processing signal '%v'", sig)) + switch sig { + case os.Interrupt, syscall.SIGTERM: + if atomic.LoadUint32(&interruptCount) < 3 { + // Initiate the cleanup only once + if atomic.AddUint32(&interruptCount, 1) == 1 { + // Call the provided cleanup handler + cleanup() + os.Exit(0) + } else { + return + } + } else { + // 3 SIGTERM/INT signals received; force exit without cleanup + logger.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") + } + case syscall.SIGQUIT: + DumpStacks("") + logger.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT") + } + // for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # + os.Exit(128 + int(sig.(syscall.Signal))) + }(sig) + } + }() +} + +const stacksLogNameTemplate = "goroutine-stacks-%s.log" + +// DumpStacks appends the runtime stack into file in dir and returns full path +// to that file. +func DumpStacks(dir string) (string, error) { + var ( + buf []byte + stackSize int + ) + bufferLen := 16384 + for stackSize == len(buf) { + buf = make([]byte, bufferLen) + stackSize = runtime.Stack(buf, true) + bufferLen *= 2 + } + buf = buf[:stackSize] + var f *os.File + if dir != "" { + path := filepath.Join(dir, fmt.Sprintf(stacksLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) + var err error + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) + if err != nil { + return "", errors.Wrap(err, "failed to open file to write the goroutine stacks") + } + defer f.Close() + defer f.Sync() + } else { + f = os.Stderr + } + if _, err := f.Write(buf); err != nil { + return "", errors.Wrap(err, "failed to write goroutine stacks") + } + return f.Name(), nil +} diff --git a/vendor/github.com/docker/go-events/.gitignore b/vendor/github.com/docker/go-events/.gitignore new file mode 100644 index 00000000..daf913b1 --- /dev/null +++ b/vendor/github.com/docker/go-events/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/docker/go-events/CONTRIBUTING.md b/vendor/github.com/docker/go-events/CONTRIBUTING.md new file mode 100644 index 00000000..d813af77 --- /dev/null +++ b/vendor/github.com/docker/go-events/CONTRIBUTING.md @@ -0,0 +1,70 @@ +# Contributing to Docker open source projects + +Want to hack on go-events? Awesome! Here are instructions to get you started. + +go-events is part of the [Docker](https://www.docker.com) project, and +follows the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +For an in-depth description of our contribution process, visit the +contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-events/LICENSE b/vendor/github.com/docker/go-events/LICENSE new file mode 100644 index 00000000..6d630cf5 --- /dev/null +++ b/vendor/github.com/docker/go-events/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-events/MAINTAINERS b/vendor/github.com/docker/go-events/MAINTAINERS new file mode 100644 index 00000000..e414d82e --- /dev/null +++ b/vendor/github.com/docker/go-events/MAINTAINERS @@ -0,0 +1,46 @@ +# go-events maintainers file +# +# This file describes who runs the docker/go-events project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aaronlehmann", + "aluzzardi", + "lk4d4", + "stevvooe", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/vendor/github.com/docker/go-events/README.md b/vendor/github.com/docker/go-events/README.md new file mode 100644 index 00000000..0acafc27 --- /dev/null +++ b/vendor/github.com/docker/go-events/README.md @@ -0,0 +1,117 @@ +# Docker Events Package + +[![GoDoc](https://godoc.org/github.com/docker/go-events?status.svg)](https://godoc.org/github.com/docker/go-events) +[![Circle CI](https://circleci.com/gh/docker/go-events.svg?style=shield)](https://circleci.com/gh/docker/go-events) + +The Docker `events` package implements a composable event distribution package +for Go. + +Originally created to implement the [notifications in Docker Registry +2](https://github.com/docker/distribution/blob/master/docs/notifications.md), +we've found the pattern to be useful in other applications. This package is +most of the same code with slightly updated interfaces. Much of the internals +have been made available. + +## Usage + +The `events` package centers around a `Sink` type. Events are written with +calls to `Sink.Write(event Event)`. Sinks can be wired up in various +configurations to achieve interesting behavior. + +The canonical example is that employed by the +[docker/distribution/notifications](https://godoc.org/github.com/docker/distribution/notifications) +package. Let's say we have a type `httpSink` where we'd like to queue +notifications. As a rule, it should send a single http request and return an +error if it fails: + +```go +func (h *httpSink) Write(event Event) error { + p, err := json.Marshal(event) + if err != nil { + return err + } + body := bytes.NewReader(p) + resp, err := h.client.Post(h.url, "application/json", body) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.Status != 200 { + return errors.New("unexpected status") + } + + return nil +} + +// implement (*httpSink).Close() +``` + +With just that, we can start using components from this package. One can call +`(*httpSink).Write` to send events as the body of a post request to a +configured URL. + +### Retries + +HTTP can be unreliable. The first feature we'd like is to have some retry: + +```go +hs := newHTTPSink(/*...*/) +retry := NewRetryingSink(hs, NewBreaker(5, time.Second)) +``` + +We now have a sink that will retry events against the `httpSink` until they +succeed. The retry will backoff for one second after 5 consecutive failures +using the breaker strategy. + +### Queues + +This isn't quite enough. We we want a sink that doesn't block while we are +waiting for events to be sent. Let's add a `Queue`: + +```go +queue := NewQueue(retry) +``` + +Now, we have an unbounded queue that will work through all events sent with +`(*Queue).Write`. Events can be added asynchronously to the queue without +blocking the current execution path. This is ideal for use in an http request. + +### Broadcast + +It usually turns out that you want to send to more than one listener. We can +use `Broadcaster` to support this: + +```go +var broadcast = NewBroadcaster() // make it available somewhere in your application. +broadcast.Add(queue) // add your queue! +broadcast.Add(queue2) // and another! +``` + +With the above, we can now call `broadcast.Write` in our http handlers and have +all the events distributed to each queue. Because the events are queued, not +listener blocks another. + +### Extending + +For the most part, the above is sufficient for a lot of applications. However, +extending the above functionality can be done implementing your own `Sink`. The +behavior and semantics of the sink can be completely dependent on the +application requirements. The interface is provided below for reference: + +```go +type Sink { + Write(Event) error + Close() error +} +``` + +Application behavior can be controlled by how `Write` behaves. The examples +above are designed to queue the message and return as quickly as possible. +Other implementations may block until the event is committed to durable +storage. + +## Copyright and license + +Copyright © 2016 Docker, Inc. go-events is licensed under the Apache License, +Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/github.com/docker/go-events/broadcast.go b/vendor/github.com/docker/go-events/broadcast.go new file mode 100644 index 00000000..5120078d --- /dev/null +++ b/vendor/github.com/docker/go-events/broadcast.go @@ -0,0 +1,178 @@ +package events + +import ( + "fmt" + "sync" + + "github.com/sirupsen/logrus" +) + +// Broadcaster sends events to multiple, reliable Sinks. The goal of this +// component is to dispatch events to configured endpoints. Reliability can be +// provided by wrapping incoming sinks. +type Broadcaster struct { + sinks []Sink + events chan Event + adds chan configureRequest + removes chan configureRequest + + shutdown chan struct{} + closed chan struct{} + once sync.Once +} + +// NewBroadcaster appends one or more sinks to the list of sinks. The +// broadcaster behavior will be affected by the properties of the sink. +// Generally, the sink should accept all messages and deal with reliability on +// its own. Use of EventQueue and RetryingSink should be used here. +func NewBroadcaster(sinks ...Sink) *Broadcaster { + b := Broadcaster{ + sinks: sinks, + events: make(chan Event), + adds: make(chan configureRequest), + removes: make(chan configureRequest), + shutdown: make(chan struct{}), + closed: make(chan struct{}), + } + + // Start the broadcaster + go b.run() + + return &b +} + +// Write accepts an event to be dispatched to all sinks. This method will never +// fail and should never block (hopefully!). The caller cedes the memory to the +// broadcaster and should not modify it after calling write. +func (b *Broadcaster) Write(event Event) error { + select { + case b.events <- event: + case <-b.closed: + return ErrSinkClosed + } + return nil +} + +// Add the sink to the broadcaster. +// +// The provided sink must be comparable with equality. Typically, this just +// works with a regular pointer type. +func (b *Broadcaster) Add(sink Sink) error { + return b.configure(b.adds, sink) +} + +// Remove the provided sink. +func (b *Broadcaster) Remove(sink Sink) error { + return b.configure(b.removes, sink) +} + +type configureRequest struct { + sink Sink + response chan error +} + +func (b *Broadcaster) configure(ch chan configureRequest, sink Sink) error { + response := make(chan error, 1) + + for { + select { + case ch <- configureRequest{ + sink: sink, + response: response}: + ch = nil + case err := <-response: + return err + case <-b.closed: + return ErrSinkClosed + } + } +} + +// Close the broadcaster, ensuring that all messages are flushed to the +// underlying sink before returning. +func (b *Broadcaster) Close() error { + b.once.Do(func() { + close(b.shutdown) + }) + + <-b.closed + return nil +} + +// run is the main broadcast loop, started when the broadcaster is created. +// Under normal conditions, it waits for events on the event channel. After +// Close is called, this goroutine will exit. +func (b *Broadcaster) run() { + defer close(b.closed) + remove := func(target Sink) { + for i, sink := range b.sinks { + if sink == target { + b.sinks = append(b.sinks[:i], b.sinks[i+1:]...) + break + } + } + } + + for { + select { + case event := <-b.events: + for _, sink := range b.sinks { + if err := sink.Write(event); err != nil { + if err == ErrSinkClosed { + // remove closed sinks + remove(sink) + continue + } + logrus.WithField("event", event).WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: dropping event") + } + } + case request := <-b.adds: + // while we have to iterate for add/remove, common iteration for + // send is faster against slice. + + var found bool + for _, sink := range b.sinks { + if request.sink == sink { + found = true + break + } + } + + if !found { + b.sinks = append(b.sinks, request.sink) + } + // b.sinks[request.sink] = struct{}{} + request.response <- nil + case request := <-b.removes: + remove(request.sink) + request.response <- nil + case <-b.shutdown: + // close all the underlying sinks + for _, sink := range b.sinks { + if err := sink.Close(); err != nil && err != ErrSinkClosed { + logrus.WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: closing sink failed") + } + } + return + } + } +} + +func (b *Broadcaster) String() string { + // Serialize copy of this broadcaster without the sync.Once, to avoid + // a data race. + + b2 := map[string]interface{}{ + "sinks": b.sinks, + "events": b.events, + "adds": b.adds, + "removes": b.removes, + + "shutdown": b.shutdown, + "closed": b.closed, + } + + return fmt.Sprint(b2) +} diff --git a/vendor/github.com/docker/go-events/channel.go b/vendor/github.com/docker/go-events/channel.go new file mode 100644 index 00000000..802cf51f --- /dev/null +++ b/vendor/github.com/docker/go-events/channel.go @@ -0,0 +1,61 @@ +package events + +import ( + "fmt" + "sync" +) + +// Channel provides a sink that can be listened on. The writer and channel +// listener must operate in separate goroutines. +// +// Consumers should listen on Channel.C until Closed is closed. +type Channel struct { + C chan Event + + closed chan struct{} + once sync.Once +} + +// NewChannel returns a channel. If buffer is zero, the channel is +// unbuffered. +func NewChannel(buffer int) *Channel { + return &Channel{ + C: make(chan Event, buffer), + closed: make(chan struct{}), + } +} + +// Done returns a channel that will always proceed once the sink is closed. +func (ch *Channel) Done() chan struct{} { + return ch.closed +} + +// Write the event to the channel. Must be called in a separate goroutine from +// the listener. +func (ch *Channel) Write(event Event) error { + select { + case ch.C <- event: + return nil + case <-ch.closed: + return ErrSinkClosed + } +} + +// Close the channel sink. +func (ch *Channel) Close() error { + ch.once.Do(func() { + close(ch.closed) + }) + + return nil +} + +func (ch *Channel) String() string { + // Serialize a copy of the Channel that doesn't contain the sync.Once, + // to avoid a data race. + ch2 := map[string]interface{}{ + "C": ch.C, + "closed": ch.closed, + } + return fmt.Sprint(ch2) +} diff --git a/vendor/github.com/docker/go-events/errors.go b/vendor/github.com/docker/go-events/errors.go new file mode 100644 index 00000000..56db7c25 --- /dev/null +++ b/vendor/github.com/docker/go-events/errors.go @@ -0,0 +1,10 @@ +package events + +import "fmt" + +var ( + // ErrSinkClosed is returned if a write is issued to a sink that has been + // closed. If encountered, the error should be considered terminal and + // retries will not be successful. + ErrSinkClosed = fmt.Errorf("events: sink closed") +) diff --git a/vendor/github.com/docker/go-events/event.go b/vendor/github.com/docker/go-events/event.go new file mode 100644 index 00000000..f0f1d9ea --- /dev/null +++ b/vendor/github.com/docker/go-events/event.go @@ -0,0 +1,15 @@ +package events + +// Event marks items that can be sent as events. +type Event interface{} + +// Sink accepts and sends events. +type Sink interface { + // Write an event to the Sink. If no error is returned, the caller will + // assume that all events have been committed to the sink. If an error is + // received, the caller may retry sending the event. + Write(event Event) error + + // Close the sink, possibly waiting for pending events to flush. + Close() error +} diff --git a/vendor/github.com/docker/go-events/filter.go b/vendor/github.com/docker/go-events/filter.go new file mode 100644 index 00000000..e6c0eb69 --- /dev/null +++ b/vendor/github.com/docker/go-events/filter.go @@ -0,0 +1,52 @@ +package events + +// Matcher matches events. +type Matcher interface { + Match(event Event) bool +} + +// MatcherFunc implements matcher with just a function. +type MatcherFunc func(event Event) bool + +// Match calls the wrapped function. +func (fn MatcherFunc) Match(event Event) bool { + return fn(event) +} + +// Filter provides an event sink that sends only events that are accepted by a +// Matcher. No methods on filter are goroutine safe. +type Filter struct { + dst Sink + matcher Matcher + closed bool +} + +// NewFilter returns a new filter that will send to events to dst that return +// true for Matcher. +func NewFilter(dst Sink, matcher Matcher) Sink { + return &Filter{dst: dst, matcher: matcher} +} + +// Write an event to the filter. +func (f *Filter) Write(event Event) error { + if f.closed { + return ErrSinkClosed + } + + if f.matcher.Match(event) { + return f.dst.Write(event) + } + + return nil +} + +// Close the filter and allow no more events to pass through. +func (f *Filter) Close() error { + // TODO(stevvooe): Not all sinks should have Close. + if f.closed { + return nil + } + + f.closed = true + return f.dst.Close() +} diff --git a/vendor/github.com/docker/go-events/queue.go b/vendor/github.com/docker/go-events/queue.go new file mode 100644 index 00000000..4bb770af --- /dev/null +++ b/vendor/github.com/docker/go-events/queue.go @@ -0,0 +1,111 @@ +package events + +import ( + "container/list" + "sync" + + "github.com/sirupsen/logrus" +) + +// Queue accepts all messages into a queue for asynchronous consumption +// by a sink. It is unbounded and thread safe but the sink must be reliable or +// events will be dropped. +type Queue struct { + dst Sink + events *list.List + cond *sync.Cond + mu sync.Mutex + closed bool +} + +// NewQueue returns a queue to the provided Sink dst. +func NewQueue(dst Sink) *Queue { + eq := Queue{ + dst: dst, + events: list.New(), + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// been closed. +func (eq *Queue) Write(event Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return ErrSinkClosed + } + + eq.events.PushBack(event) + eq.cond.Signal() // signal waiters + + return nil +} + +// Close shutsdown the event queue, flushing +func (eq *Queue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return nil + } + + // set closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + return eq.dst.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *Queue) run() { + for { + event := eq.next() + + if event == nil { + return // nil block means event queue is closed. + } + + if err := eq.dst.Write(event); err != nil { + // TODO(aaronl): Dropping events could be bad depending + // on the application. We should have a way of + // communicating this condition. However, logging + // at a log level above debug may not be appropriate. + // Eventually, go-events should not use logrus at all, + // and should bubble up conditions like this through + // error values. + logrus.WithFields(logrus.Fields{ + "event": event, + "sink": eq.dst, + }).WithError(err).Debug("eventqueue: dropped event") + } + } +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *Queue) next() Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.(Event) + eq.events.Remove(front) + + return block +} diff --git a/vendor/github.com/docker/go-events/retry.go b/vendor/github.com/docker/go-events/retry.go new file mode 100644 index 00000000..b7f0a542 --- /dev/null +++ b/vendor/github.com/docker/go-events/retry.go @@ -0,0 +1,260 @@ +package events + +import ( + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" +) + +// RetryingSink retries the write until success or an ErrSinkClosed is +// returned. Underlying sink must have p > 0 of succeeding or the sink will +// block. Retry is configured with a RetryStrategy. Concurrent calls to a +// retrying sink are serialized through the sink, meaning that if one is +// in-flight, another will not proceed. +type RetryingSink struct { + sink Sink + strategy RetryStrategy + closed chan struct{} + once sync.Once +} + +// NewRetryingSink returns a sink that will retry writes to a sink, backing +// off on failure. Parameters threshold and backoff adjust the behavior of the +// circuit breaker. +func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink { + rs := &RetryingSink{ + sink: sink, + strategy: strategy, + closed: make(chan struct{}), + } + + return rs +} + +// Write attempts to flush the events to the downstream sink until it succeeds +// or the sink is closed. +func (rs *RetryingSink) Write(event Event) error { + logger := logrus.WithField("event", event) + +retry: + select { + case <-rs.closed: + return ErrSinkClosed + default: + } + + if backoff := rs.strategy.Proceed(event); backoff > 0 { + select { + case <-time.After(backoff): + // TODO(stevvooe): This branch holds up the next try. Before, we + // would simply break to the "retry" label and then possibly wait + // again. However, this requires all retry strategies to have a + // large probability of probing the sync for success, rather than + // just backing off and sending the request. + case <-rs.closed: + return ErrSinkClosed + } + } + + if err := rs.sink.Write(event); err != nil { + if err == ErrSinkClosed { + // terminal! + return err + } + + logger := logger.WithError(err) // shadow!! + + if rs.strategy.Failure(event, err) { + logger.Errorf("retryingsink: dropped event") + return nil + } + + logger.Errorf("retryingsink: error writing event, retrying") + goto retry + } + + rs.strategy.Success(event) + return nil +} + +// Close closes the sink and the underlying sink. +func (rs *RetryingSink) Close() error { + rs.once.Do(func() { + close(rs.closed) + }) + + return nil +} + +func (rs *RetryingSink) String() string { + // Serialize a copy of the RetryingSink without the sync.Once, to avoid + // a data race. + rs2 := map[string]interface{}{ + "sink": rs.sink, + "strategy": rs.strategy, + "closed": rs.closed, + } + return fmt.Sprint(rs2) +} + +// RetryStrategy defines a strategy for retrying event sink writes. +// +// All methods should be goroutine safe. +type RetryStrategy interface { + // Proceed is called before every event send. If proceed returns a + // positive, non-zero integer, the retryer will back off by the provided + // duration. + // + // An event is provided, by may be ignored. + Proceed(event Event) time.Duration + + // Failure reports a failure to the strategy. If this method returns true, + // the event should be dropped. + Failure(event Event, err error) bool + + // Success should be called when an event is sent successfully. + Success(event Event) +} + +// Breaker implements a circuit breaker retry strategy. +// +// The current implementation never drops events. +type Breaker struct { + threshold int + recent int + last time.Time + backoff time.Duration // time after which we retry after failure. + mu sync.Mutex +} + +var _ RetryStrategy = &Breaker{} + +// NewBreaker returns a breaker that will backoff after the threshold has been +// tripped. A Breaker is thread safe and may be shared by many goroutines. +func NewBreaker(threshold int, backoff time.Duration) *Breaker { + return &Breaker{ + threshold: threshold, + backoff: backoff, + } +} + +// Proceed checks the failures against the threshold. +func (b *Breaker) Proceed(event Event) time.Duration { + b.mu.Lock() + defer b.mu.Unlock() + + if b.recent < b.threshold { + return 0 + } + + return b.last.Add(b.backoff).Sub(time.Now()) +} + +// Success resets the breaker. +func (b *Breaker) Success(event Event) { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent = 0 + b.last = time.Time{} +} + +// Failure records the failure and latest failure time. +func (b *Breaker) Failure(event Event, err error) bool { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent++ + b.last = time.Now().UTC() + return false // never drop events. +} + +var ( + // DefaultExponentialBackoffConfig provides a default configuration for + // exponential backoff. + DefaultExponentialBackoffConfig = ExponentialBackoffConfig{ + Base: time.Second, + Factor: time.Second, + Max: 20 * time.Second, + } +) + +// ExponentialBackoffConfig configures backoff parameters. +// +// Note that these parameters operate on the upper bound for choosing a random +// value. For example, at Base=1s, a random value in [0,1s) will be chosen for +// the backoff value. +type ExponentialBackoffConfig struct { + // Base is the minimum bound for backing off after failure. + Base time.Duration + + // Factor sets the amount of time by which the backoff grows with each + // failure. + Factor time.Duration + + // Max is the absolute maxiumum bound for a single backoff. + Max time.Duration +} + +// ExponentialBackoff implements random backoff with exponentially increasing +// bounds as the number consecutive failures increase. +type ExponentialBackoff struct { + failures uint64 // consecutive failure counter (needs to be 64-bit aligned) + config ExponentialBackoffConfig +} + +// NewExponentialBackoff returns an exponential backoff strategy with the +// desired config. If config is nil, the default is returned. +func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff { + return &ExponentialBackoff{ + config: config, + } +} + +// Proceed returns the next randomly bound exponential backoff time. +func (b *ExponentialBackoff) Proceed(event Event) time.Duration { + return b.backoff(atomic.LoadUint64(&b.failures)) +} + +// Success resets the failures counter. +func (b *ExponentialBackoff) Success(event Event) { + atomic.StoreUint64(&b.failures, 0) +} + +// Failure increments the failure counter. +func (b *ExponentialBackoff) Failure(event Event, err error) bool { + atomic.AddUint64(&b.failures, 1) + return false +} + +// backoff calculates the amount of time to wait based on the number of +// consecutive failures. +func (b *ExponentialBackoff) backoff(failures uint64) time.Duration { + if failures <= 0 { + // proceed normally when there are no failures. + return 0 + } + + factor := b.config.Factor + if factor <= 0 { + factor = DefaultExponentialBackoffConfig.Factor + } + + backoff := b.config.Base + factor*time.Duration(1<<(failures-1)) + + max := b.config.Max + if max <= 0 { + max = DefaultExponentialBackoffConfig.Max + } + + if backoff > max || backoff < 0 { + backoff = max + } + + // Choose a uniformly distributed value from [0, backoff). + return time.Duration(rand.Int63n(int64(backoff))) +} diff --git a/vendor/github.com/docker/libnetwork/LICENSE b/vendor/github.com/docker/libnetwork/LICENSE new file mode 100644 index 00000000..e06d2081 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/docker/libnetwork/resolvconf/README.md b/vendor/github.com/docker/libnetwork/resolvconf/README.md new file mode 100644 index 00000000..cdda554b --- /dev/null +++ b/vendor/github.com/docker/libnetwork/resolvconf/README.md @@ -0,0 +1 @@ +Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf diff --git a/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go new file mode 100644 index 00000000..e348bc57 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go @@ -0,0 +1,26 @@ +package dns + +import ( + "regexp" +) + +// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range. +const IPLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)` + +// IPv4Localhost is a regex pattern for IPv4 localhost address range. +const IPv4Localhost = `(127\.([0-9]{1,3}\.){2}[0-9]{1,3})` + +var localhostIPRegexp = regexp.MustCompile(IPLocalhost) +var localhostIPv4Regexp = regexp.MustCompile(IPv4Localhost) + +// IsLocalhost returns true if ip matches the localhost IP regular expression. +// Used for determining if nameserver settings are being passed which are +// localhost addresses +func IsLocalhost(ip string) bool { + return localhostIPRegexp.MatchString(ip) +} + +// IsIPv4Localhost returns true if ip matches the IPv4 localhost regular expression. +func IsIPv4Localhost(ip string) bool { + return localhostIPv4Regexp.MatchString(ip) +} diff --git a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go new file mode 100644 index 00000000..946bb871 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go @@ -0,0 +1,285 @@ +// Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf +package resolvconf + +import ( + "bytes" + "io/ioutil" + "regexp" + "strings" + "sync" + + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/libnetwork/resolvconf/dns" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +const ( + // defaultPath is the default path to the resolv.conf that contains information to resolve DNS. See Path(). + defaultPath = "/etc/resolv.conf" + // alternatePath is a path different from defaultPath, that may be used to resolve DNS. See Path(). + alternatePath = "/run/systemd/resolve/resolv.conf" +) + +var ( + detectSystemdResolvConfOnce sync.Once + pathAfterSystemdDetection = defaultPath +) + +// Path returns the path to the resolv.conf file that libnetwork should use. +// +// When /etc/resolv.conf contains 127.0.0.53 as the only nameserver, then +// it is assumed systemd-resolved manages DNS. Because inside the container 127.0.0.53 +// is not a valid DNS server, Path() returns /run/systemd/resolve/resolv.conf +// which is the resolv.conf that systemd-resolved generates and manages. +// Otherwise Path() returns /etc/resolv.conf. +// +// Errors are silenced as they will inevitably resurface at future open/read calls. +// +// More information at https://www.freedesktop.org/software/systemd/man/systemd-resolved.service.html#/etc/resolv.conf +func Path() string { + detectSystemdResolvConfOnce.Do(func() { + candidateResolvConf, err := ioutil.ReadFile(defaultPath) + if err != nil { + // silencing error as it will resurface at next calls trying to read defaultPath + return + } + ns := GetNameservers(candidateResolvConf, types.IP) + if len(ns) == 1 && ns[0] == "127.0.0.53" { + pathAfterSystemdDetection = alternatePath + logrus.Infof("detected 127.0.0.53 nameserver, assuming systemd-resolved, so using resolv.conf: %s", alternatePath) + } + }) + return pathAfterSystemdDetection +} + +var ( + // Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS + defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"} + defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"} + ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)` + ipv4Address = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock + // This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also + // will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants + // -- e.g. other link-local types -- either won't work in containers or are unnecessary. + // For readability and sufficiency for Docker purposes this seemed more reasonable than a + // 1000+ character regexp with exact and complete IPv6 validation + ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})(%\w+)?` + + localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + dns.IPLocalhost + `\s*\n*`) + nsIPv6Regexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`) + nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`) + nsIPv6Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv6Address + `))\s*$`) + nsIPv4Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `))\s*$`) + searchRegexp = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`) + optionsRegexp = regexp.MustCompile(`^\s*options\s*(([^\s]+\s*)*)$`) +) + +var lastModified struct { + sync.Mutex + sha256 string + contents []byte +} + +// File contains the resolv.conf content and its hash +type File struct { + Content []byte + Hash string +} + +// Get returns the contents of /etc/resolv.conf and its hash +func Get() (*File, error) { + return GetSpecific(Path()) +} + +// GetSpecific returns the contents of the user specified resolv.conf file and its hash +func GetSpecific(path string) (*File, error) { + resolv, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + hash, err := ioutils.HashData(bytes.NewReader(resolv)) + if err != nil { + return nil, err + } + return &File{Content: resolv, Hash: hash}, nil +} + +// GetIfChanged retrieves the host /etc/resolv.conf file, checks against the last hash +// and, if modified since last check, returns the bytes and new hash. +// This feature is used by the resolv.conf updater for containers +func GetIfChanged() (*File, error) { + lastModified.Lock() + defer lastModified.Unlock() + + resolv, err := ioutil.ReadFile(Path()) + if err != nil { + return nil, err + } + newHash, err := ioutils.HashData(bytes.NewReader(resolv)) + if err != nil { + return nil, err + } + if lastModified.sha256 != newHash { + lastModified.sha256 = newHash + lastModified.contents = resolv + return &File{Content: resolv, Hash: newHash}, nil + } + // nothing changed, so return no data + return nil, nil +} + +// GetLastModified retrieves the last used contents and hash of the host resolv.conf. +// Used by containers updating on restart +func GetLastModified() *File { + lastModified.Lock() + defer lastModified.Unlock() + + return &File{Content: lastModified.contents, Hash: lastModified.sha256} +} + +// FilterResolvDNS cleans up the config in resolvConf. It has two main jobs: +// 1. It looks for localhost (127.*|::1) entries in the provided +// resolv.conf, removing local nameserver entries, and, if the resulting +// cleaned config has no defined nameservers left, adds default DNS entries +// 2. Given the caller provides the enable/disable state of IPv6, the filter +// code will remove all IPv6 nameservers if it is not enabled for containers +// +func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) { + cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{}) + // if IPv6 is not enabled, also clean out any IPv6 address nameserver + if !ipv6Enabled { + cleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{}) + } + // if the resulting resolvConf has no more nameservers defined, add appropriate + // default DNS servers for IPv4 and (optionally) IPv6 + if len(GetNameservers(cleanedResolvConf, types.IP)) == 0 { + logrus.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v", defaultIPv4Dns) + dns := defaultIPv4Dns + if ipv6Enabled { + logrus.Infof("IPv6 enabled; Adding default IPv6 external servers: %v", defaultIPv6Dns) + dns = append(dns, defaultIPv6Dns...) + } + cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...) + } + hash, err := ioutils.HashData(bytes.NewReader(cleanedResolvConf)) + if err != nil { + return nil, err + } + return &File{Content: cleanedResolvConf, Hash: hash}, nil +} + +// getLines parses input into lines and strips away comments. +func getLines(input []byte, commentMarker []byte) [][]byte { + lines := bytes.Split(input, []byte("\n")) + var output [][]byte + for _, currentLine := range lines { + var commentIndex = bytes.Index(currentLine, commentMarker) + if commentIndex == -1 { + output = append(output, currentLine) + } else { + output = append(output, currentLine[:commentIndex]) + } + } + return output +} + +// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf +func GetNameservers(resolvConf []byte, kind int) []string { + nameservers := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + var ns [][]byte + if kind == types.IP { + ns = nsRegexp.FindSubmatch(line) + } else if kind == types.IPv4 { + ns = nsIPv4Regexpmatch.FindSubmatch(line) + } else if kind == types.IPv6 { + ns = nsIPv6Regexpmatch.FindSubmatch(line) + } + if len(ns) > 0 { + nameservers = append(nameservers, string(ns[1])) + } + } + return nameservers +} + +// GetNameserversAsCIDR returns nameservers (if any) listed in +// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") +// This function's output is intended for net.ParseCIDR +func GetNameserversAsCIDR(resolvConf []byte) []string { + nameservers := []string{} + for _, nameserver := range GetNameservers(resolvConf, types.IP) { + var address string + // If IPv6, strip zone if present + if strings.Contains(nameserver, ":") { + address = strings.Split(nameserver, "%")[0] + "/128" + } else { + address = nameserver + "/32" + } + nameservers = append(nameservers, address) + } + return nameservers +} + +// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf +// If more than one search line is encountered, only the contents of the last +// one is returned. +func GetSearchDomains(resolvConf []byte) []string { + domains := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + match := searchRegexp.FindSubmatch(line) + if match == nil { + continue + } + domains = strings.Fields(string(match[1])) + } + return domains +} + +// GetOptions returns options (if any) listed in /etc/resolv.conf +// If more than one options line is encountered, only the contents of the last +// one is returned. +func GetOptions(resolvConf []byte) []string { + options := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + match := optionsRegexp.FindSubmatch(line) + if match == nil { + continue + } + options = strings.Fields(string(match[1])) + } + return options +} + +// Build writes a configuration file to path containing a "nameserver" entry +// for every element in dns, a "search" entry for every element in +// dnsSearch, and an "options" entry for every element in dnsOptions. +func Build(path string, dns, dnsSearch, dnsOptions []string) (*File, error) { + content := bytes.NewBuffer(nil) + if len(dnsSearch) > 0 { + if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." { + if _, err := content.WriteString("search " + searchString + "\n"); err != nil { + return nil, err + } + } + } + for _, dns := range dns { + if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil { + return nil, err + } + } + if len(dnsOptions) > 0 { + if optsString := strings.Join(dnsOptions, " "); strings.Trim(optsString, " ") != "" { + if _, err := content.WriteString("options " + optsString + "\n"); err != nil { + return nil, err + } + } + } + + hash, err := ioutils.HashData(bytes.NewReader(content.Bytes())) + if err != nil { + return nil, err + } + + return &File{Content: content.Bytes(), Hash: hash}, ioutil.WriteFile(path, content.Bytes(), 0644) +} diff --git a/vendor/github.com/docker/libnetwork/types/types.go b/vendor/github.com/docker/libnetwork/types/types.go new file mode 100644 index 00000000..db1960c1 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/types/types.go @@ -0,0 +1,653 @@ +// Package types contains types that are common across libnetwork project +package types + +import ( + "bytes" + "fmt" + "net" + "strconv" + "strings" + + "github.com/ishidawataru/sctp" +) + +// constants for the IP address type +const ( + IP = iota // IPv4 and IPv6 + IPv4 + IPv6 +) + +// EncryptionKey is the libnetwork representation of the key distributed by the lead +// manager. +type EncryptionKey struct { + Subsystem string + Algorithm int32 + Key []byte + LamportTime uint64 +} + +// UUID represents a globally unique ID of various resources like network and endpoint +type UUID string + +// QosPolicy represents a quality of service policy on an endpoint +type QosPolicy struct { + MaxEgressBandwidth uint64 +} + +// TransportPort represents a local Layer 4 endpoint +type TransportPort struct { + Proto Protocol + Port uint16 +} + +// Equal checks if this instance of Transportport is equal to the passed one +func (t *TransportPort) Equal(o *TransportPort) bool { + if t == o { + return true + } + + if o == nil { + return false + } + + if t.Proto != o.Proto || t.Port != o.Port { + return false + } + + return true +} + +// GetCopy returns a copy of this TransportPort structure instance +func (t *TransportPort) GetCopy() TransportPort { + return TransportPort{Proto: t.Proto, Port: t.Port} +} + +// String returns the TransportPort structure in string form +func (t *TransportPort) String() string { + return fmt.Sprintf("%s/%d", t.Proto.String(), t.Port) +} + +// FromString reads the TransportPort structure from string +func (t *TransportPort) FromString(s string) error { + ps := strings.Split(s, "/") + if len(ps) == 2 { + t.Proto = ParseProtocol(ps[0]) + if p, err := strconv.ParseUint(ps[1], 10, 16); err == nil { + t.Port = uint16(p) + return nil + } + } + return BadRequestErrorf("invalid format for transport port: %s", s) +} + +// PortBinding represents a port binding between the container and the host +type PortBinding struct { + Proto Protocol + IP net.IP + Port uint16 + HostIP net.IP + HostPort uint16 + HostPortEnd uint16 +} + +// HostAddr returns the host side transport address +func (p PortBinding) HostAddr() (net.Addr, error) { + switch p.Proto { + case UDP: + return &net.UDPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil + case TCP: + return &net.TCPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil + case SCTP: + return &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: p.HostIP}}, Port: int(p.HostPort)}, nil + default: + return nil, ErrInvalidProtocolBinding(p.Proto.String()) + } +} + +// ContainerAddr returns the container side transport address +func (p PortBinding) ContainerAddr() (net.Addr, error) { + switch p.Proto { + case UDP: + return &net.UDPAddr{IP: p.IP, Port: int(p.Port)}, nil + case TCP: + return &net.TCPAddr{IP: p.IP, Port: int(p.Port)}, nil + case SCTP: + return &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: p.IP}}, Port: int(p.Port)}, nil + default: + return nil, ErrInvalidProtocolBinding(p.Proto.String()) + } +} + +// GetCopy returns a copy of this PortBinding structure instance +func (p *PortBinding) GetCopy() PortBinding { + return PortBinding{ + Proto: p.Proto, + IP: GetIPCopy(p.IP), + Port: p.Port, + HostIP: GetIPCopy(p.HostIP), + HostPort: p.HostPort, + HostPortEnd: p.HostPortEnd, + } +} + +// String returns the PortBinding structure in string form +func (p *PortBinding) String() string { + ret := fmt.Sprintf("%s/", p.Proto) + if p.IP != nil { + ret += p.IP.String() + } + ret = fmt.Sprintf("%s:%d/", ret, p.Port) + if p.HostIP != nil { + ret += p.HostIP.String() + } + ret = fmt.Sprintf("%s:%d", ret, p.HostPort) + return ret +} + +// FromString reads the PortBinding structure from string s. +// String s is a triple of "protocol/containerIP:port/hostIP:port" +// containerIP and hostIP can be in dotted decimal ("192.0.2.1") or IPv6 ("2001:db8::68") form. +// Zoned addresses ("169.254.0.23%eth0" or "fe80::1ff:fe23:4567:890a%eth0") are not supported. +// If string s is incorrectly formatted or the IP addresses or ports cannot be parsed, FromString +// returns an error. +func (p *PortBinding) FromString(s string) error { + ps := strings.Split(s, "/") + if len(ps) != 3 { + return BadRequestErrorf("invalid format for port binding: %s", s) + } + + p.Proto = ParseProtocol(ps[0]) + + var err error + if p.IP, p.Port, err = parseIPPort(ps[1]); err != nil { + return BadRequestErrorf("failed to parse Container IP/Port in port binding: %s", err.Error()) + } + + if p.HostIP, p.HostPort, err = parseIPPort(ps[2]); err != nil { + return BadRequestErrorf("failed to parse Host IP/Port in port binding: %s", err.Error()) + } + + return nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + hoststr, portstr, err := net.SplitHostPort(s) + if err != nil { + return nil, 0, err + } + + ip := net.ParseIP(hoststr) + if ip == nil { + return nil, 0, BadRequestErrorf("invalid ip: %s", hoststr) + } + + port, err := strconv.ParseUint(portstr, 10, 16) + if err != nil { + return nil, 0, BadRequestErrorf("invalid port: %s", portstr) + } + + return ip, uint16(port), nil +} + +// Equal checks if this instance of PortBinding is equal to the passed one +func (p *PortBinding) Equal(o *PortBinding) bool { + if p == o { + return true + } + + if o == nil { + return false + } + + if p.Proto != o.Proto || p.Port != o.Port || + p.HostPort != o.HostPort || p.HostPortEnd != o.HostPortEnd { + return false + } + + if p.IP != nil { + if !p.IP.Equal(o.IP) { + return false + } + } else { + if o.IP != nil { + return false + } + } + + if p.HostIP != nil { + if !p.HostIP.Equal(o.HostIP) { + return false + } + } else { + if o.HostIP != nil { + return false + } + } + + return true +} + +// ErrInvalidProtocolBinding is returned when the port binding protocol is not valid. +type ErrInvalidProtocolBinding string + +func (ipb ErrInvalidProtocolBinding) Error() string { + return fmt.Sprintf("invalid transport protocol: %s", string(ipb)) +} + +const ( + // ICMP is for the ICMP ip protocol + ICMP = 1 + // TCP is for the TCP ip protocol + TCP = 6 + // UDP is for the UDP ip protocol + UDP = 17 + // SCTP is for the SCTP ip protocol + SCTP = 132 +) + +// Protocol represents an IP protocol number +type Protocol uint8 + +func (p Protocol) String() string { + switch p { + case ICMP: + return "icmp" + case TCP: + return "tcp" + case UDP: + return "udp" + case SCTP: + return "sctp" + default: + return fmt.Sprintf("%d", p) + } +} + +// ParseProtocol returns the respective Protocol type for the passed string +func ParseProtocol(s string) Protocol { + switch strings.ToLower(s) { + case "icmp": + return ICMP + case "udp": + return UDP + case "tcp": + return TCP + case "sctp": + return SCTP + default: + return 0 + } +} + +// GetMacCopy returns a copy of the passed MAC address +func GetMacCopy(from net.HardwareAddr) net.HardwareAddr { + if from == nil { + return nil + } + to := make(net.HardwareAddr, len(from)) + copy(to, from) + return to +} + +// GetIPCopy returns a copy of the passed IP address +func GetIPCopy(from net.IP) net.IP { + if from == nil { + return nil + } + to := make(net.IP, len(from)) + copy(to, from) + return to +} + +// GetIPNetCopy returns a copy of the passed IP Network +func GetIPNetCopy(from *net.IPNet) *net.IPNet { + if from == nil { + return nil + } + bm := make(net.IPMask, len(from.Mask)) + copy(bm, from.Mask) + return &net.IPNet{IP: GetIPCopy(from.IP), Mask: bm} +} + +// GetIPNetCanonical returns the canonical form for the passed network +func GetIPNetCanonical(nw *net.IPNet) *net.IPNet { + if nw == nil { + return nil + } + c := GetIPNetCopy(nw) + c.IP = c.IP.Mask(nw.Mask) + return c +} + +// CompareIPNet returns equal if the two IP Networks are equal +func CompareIPNet(a, b *net.IPNet) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.IP.Equal(b.IP) && bytes.Equal(a.Mask, b.Mask) +} + +// GetMinimalIP returns the address in its shortest form +// If ip contains an IPv4-mapped IPv6 address, the 4-octet form of the IPv4 address will be returned. +// Otherwise ip is returned unchanged. +func GetMinimalIP(ip net.IP) net.IP { + if ip != nil && ip.To4() != nil { + return ip.To4() + } + return ip +} + +// GetMinimalIPNet returns a copy of the passed IP Network with congruent ip and mask notation +func GetMinimalIPNet(nw *net.IPNet) *net.IPNet { + if nw == nil { + return nil + } + if len(nw.IP) == 16 && nw.IP.To4() != nil { + m := nw.Mask + if len(m) == 16 { + m = m[12:16] + } + return &net.IPNet{IP: nw.IP.To4(), Mask: m} + } + return nw +} + +// IsIPNetValid returns true if the ipnet is a valid network/mask +// combination. Otherwise returns false. +func IsIPNetValid(nw *net.IPNet) bool { + return nw.String() != "0.0.0.0/0" +} + +var v4inV6MaskPrefix = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + +// compareIPMask checks if the passed ip and mask are semantically compatible. +// It returns the byte indexes for the address and mask so that caller can +// do bitwise operations without modifying address representation. +func compareIPMask(ip net.IP, mask net.IPMask) (is int, ms int, err error) { + // Find the effective starting of address and mask + if len(ip) == net.IPv6len && ip.To4() != nil { + is = 12 + } + if len(ip[is:]) == net.IPv4len && len(mask) == net.IPv6len && bytes.Equal(mask[:12], v4inV6MaskPrefix) { + ms = 12 + } + // Check if address and mask are semantically compatible + if len(ip[is:]) != len(mask[ms:]) { + err = fmt.Errorf("ip and mask are not compatible: (%#v, %#v)", ip, mask) + } + return +} + +// GetHostPartIP returns the host portion of the ip address identified by the mask. +// IP address representation is not modified. If address and mask are not compatible +// an error is returned. +func GetHostPartIP(ip net.IP, mask net.IPMask) (net.IP, error) { + // Find the effective starting of address and mask + is, ms, err := compareIPMask(ip, mask) + if err != nil { + return nil, fmt.Errorf("cannot compute host portion ip address because %s", err) + } + + // Compute host portion + out := GetIPCopy(ip) + for i := 0; i < len(mask[ms:]); i++ { + out[is+i] &= ^mask[ms+i] + } + + return out, nil +} + +// GetBroadcastIP returns the broadcast ip address for the passed network (ip and mask). +// IP address representation is not modified. If address and mask are not compatible +// an error is returned. +func GetBroadcastIP(ip net.IP, mask net.IPMask) (net.IP, error) { + // Find the effective starting of address and mask + is, ms, err := compareIPMask(ip, mask) + if err != nil { + return nil, fmt.Errorf("cannot compute broadcast ip address because %s", err) + } + + // Compute broadcast address + out := GetIPCopy(ip) + for i := 0; i < len(mask[ms:]); i++ { + out[is+i] |= ^mask[ms+i] + } + + return out, nil +} + +// ParseCIDR returns the *net.IPNet represented by the passed CIDR notation +func ParseCIDR(cidr string) (n *net.IPNet, e error) { + var i net.IP + if i, n, e = net.ParseCIDR(cidr); e == nil { + n.IP = i + } + return +} + +const ( + // NEXTHOP indicates a StaticRoute with an IP next hop. + NEXTHOP = iota + + // CONNECTED indicates a StaticRoute with an interface for directly connected peers. + CONNECTED +) + +// StaticRoute is a statically-provisioned IP route. +type StaticRoute struct { + Destination *net.IPNet + + RouteType int // NEXT_HOP or CONNECTED + + // NextHop will be resolved by the kernel (i.e. as a loose hop). + NextHop net.IP +} + +// GetCopy returns a copy of this StaticRoute structure +func (r *StaticRoute) GetCopy() *StaticRoute { + d := GetIPNetCopy(r.Destination) + nh := GetIPCopy(r.NextHop) + return &StaticRoute{Destination: d, + RouteType: r.RouteType, + NextHop: nh, + } +} + +// InterfaceStatistics represents the interface's statistics +type InterfaceStatistics struct { + RxBytes uint64 + RxPackets uint64 + RxErrors uint64 + RxDropped uint64 + TxBytes uint64 + TxPackets uint64 + TxErrors uint64 + TxDropped uint64 +} + +func (is *InterfaceStatistics) String() string { + return fmt.Sprintf("\nRxBytes: %d, RxPackets: %d, RxErrors: %d, RxDropped: %d, TxBytes: %d, TxPackets: %d, TxErrors: %d, TxDropped: %d", + is.RxBytes, is.RxPackets, is.RxErrors, is.RxDropped, is.TxBytes, is.TxPackets, is.TxErrors, is.TxDropped) +} + +/****************************** + * Well-known Error Interfaces + ******************************/ + +// MaskableError is an interface for errors which can be ignored by caller +type MaskableError interface { + // Maskable makes implementer into MaskableError type + Maskable() +} + +// RetryError is an interface for errors which might get resolved through retry +type RetryError interface { + // Retry makes implementer into RetryError type + Retry() +} + +// BadRequestError is an interface for errors originated by a bad request +type BadRequestError interface { + // BadRequest makes implementer into BadRequestError type + BadRequest() +} + +// NotFoundError is an interface for errors raised because a needed resource is not available +type NotFoundError interface { + // NotFound makes implementer into NotFoundError type + NotFound() +} + +// ForbiddenError is an interface for errors which denote a valid request that cannot be honored +type ForbiddenError interface { + // Forbidden makes implementer into ForbiddenError type + Forbidden() +} + +// NoServiceError is an interface for errors returned when the required service is not available +type NoServiceError interface { + // NoService makes implementer into NoServiceError type + NoService() +} + +// TimeoutError is an interface for errors raised because of timeout +type TimeoutError interface { + // Timeout makes implementer into TimeoutError type + Timeout() +} + +// NotImplementedError is an interface for errors raised because of requested functionality is not yet implemented +type NotImplementedError interface { + // NotImplemented makes implementer into NotImplementedError type + NotImplemented() +} + +// InternalError is an interface for errors raised because of an internal error +type InternalError interface { + // Internal makes implementer into InternalError type + Internal() +} + +/****************************** + * Well-known Error Formatters + ******************************/ + +// BadRequestErrorf creates an instance of BadRequestError +func BadRequestErrorf(format string, params ...interface{}) error { + return badRequest(fmt.Sprintf(format, params...)) +} + +// NotFoundErrorf creates an instance of NotFoundError +func NotFoundErrorf(format string, params ...interface{}) error { + return notFound(fmt.Sprintf(format, params...)) +} + +// ForbiddenErrorf creates an instance of ForbiddenError +func ForbiddenErrorf(format string, params ...interface{}) error { + return forbidden(fmt.Sprintf(format, params...)) +} + +// NoServiceErrorf creates an instance of NoServiceError +func NoServiceErrorf(format string, params ...interface{}) error { + return noService(fmt.Sprintf(format, params...)) +} + +// NotImplementedErrorf creates an instance of NotImplementedError +func NotImplementedErrorf(format string, params ...interface{}) error { + return notImpl(fmt.Sprintf(format, params...)) +} + +// TimeoutErrorf creates an instance of TimeoutError +func TimeoutErrorf(format string, params ...interface{}) error { + return timeout(fmt.Sprintf(format, params...)) +} + +// InternalErrorf creates an instance of InternalError +func InternalErrorf(format string, params ...interface{}) error { + return internal(fmt.Sprintf(format, params...)) +} + +// InternalMaskableErrorf creates an instance of InternalError and MaskableError +func InternalMaskableErrorf(format string, params ...interface{}) error { + return maskInternal(fmt.Sprintf(format, params...)) +} + +// RetryErrorf creates an instance of RetryError +func RetryErrorf(format string, params ...interface{}) error { + return retry(fmt.Sprintf(format, params...)) +} + +/*********************** + * Internal Error Types + ***********************/ +type badRequest string + +func (br badRequest) Error() string { + return string(br) +} +func (br badRequest) BadRequest() {} + +type maskBadRequest string + +type notFound string + +func (nf notFound) Error() string { + return string(nf) +} +func (nf notFound) NotFound() {} + +type forbidden string + +func (frb forbidden) Error() string { + return string(frb) +} +func (frb forbidden) Forbidden() {} + +type noService string + +func (ns noService) Error() string { + return string(ns) +} +func (ns noService) NoService() {} + +type maskNoService string + +type timeout string + +func (to timeout) Error() string { + return string(to) +} +func (to timeout) Timeout() {} + +type notImpl string + +func (ni notImpl) Error() string { + return string(ni) +} +func (ni notImpl) NotImplemented() {} + +type internal string + +func (nt internal) Error() string { + return string(nt) +} +func (nt internal) Internal() {} + +type maskInternal string + +func (mnt maskInternal) Error() string { + return string(mnt) +} +func (mnt maskInternal) Internal() {} +func (mnt maskInternal) Maskable() {} + +type retry string + +func (r retry) Error() string { + return string(r) +} +func (r retry) Retry() {} diff --git a/vendor/github.com/genuinetools/img/AUTHORS b/vendor/github.com/genuinetools/img/AUTHORS new file mode 100644 index 00000000..25ec3fdd --- /dev/null +++ b/vendor/github.com/genuinetools/img/AUTHORS @@ -0,0 +1,16 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `make AUTHORS`. + +Akihiro Suda +Anand Patil +Daniel Schep +David Medberry +Derek McGowan +Eric Hotinger +hansmi +Jessica Frazelle +Matthew Fisher +nogoegst +Paul Tagliamonte +Ryan Cox +Ryan Dunckel diff --git a/vendor/github.com/genuinetools/img/LICENSE b/vendor/github.com/genuinetools/img/LICENSE new file mode 100644 index 00000000..fd05738d --- /dev/null +++ b/vendor/github.com/genuinetools/img/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 The Genuinetools Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/genuinetools/img/client/client.go b/vendor/github.com/genuinetools/img/client/client.go new file mode 100644 index 00000000..7518c75c --- /dev/null +++ b/vendor/github.com/genuinetools/img/client/client.go @@ -0,0 +1,57 @@ +package client + +import ( + "os" + "path/filepath" + + "github.com/containerd/containerd/snapshots/overlay" + "github.com/genuinetools/img/types" + "github.com/moby/buildkit/control" + "github.com/moby/buildkit/session" + "github.com/sirupsen/logrus" +) + +// Client holds the information for the client we will use for communicating +// with the buildkit controller. +type Client struct { + backend string + localDirs map[string]string + root string + + sessionManager *session.Manager + controller *control.Controller +} + +// New returns a new client for communicating with the buildkit controller. +func New(root, backend string, localDirs map[string]string) (*Client, error) { + // Set the name for the directory executor. + name := "runc" + + switch backend { + case types.AutoBackend: + if overlay.Supported(root) == nil { + backend = types.OverlayFSBackend + } else { + backend = types.NativeBackend + } + logrus.Debugf("using backend: %s", backend) + } + + // Create the root/ + root = filepath.Join(root, name, backend) + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + + // Create the start of the client. + return &Client{ + backend: backend, + root: root, + localDirs: localDirs, + }, nil +} + +// Close safely closes the client. +// This used to shut down the FUSE server but since that was removed +// it is basically a no-op now. +func (c *Client) Close() {} diff --git a/vendor/github.com/genuinetools/img/client/controller.go b/vendor/github.com/genuinetools/img/client/controller.go new file mode 100644 index 00000000..eb2498b5 --- /dev/null +++ b/vendor/github.com/genuinetools/img/client/controller.go @@ -0,0 +1,83 @@ +package client + +import ( + "fmt" + "path/filepath" + + "github.com/containerd/containerd/remotes/docker" + "github.com/moby/buildkit/cache/remotecache" + inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline" + localremotecache "github.com/moby/buildkit/cache/remotecache/local" + registryremotecache "github.com/moby/buildkit/cache/remotecache/registry" + "github.com/moby/buildkit/control" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/dockerfile/builder" + "github.com/moby/buildkit/frontend/gateway" + "github.com/moby/buildkit/frontend/gateway/forwarder" + "github.com/moby/buildkit/solver/bboltcachestorage" + "github.com/moby/buildkit/worker" + "github.com/moby/buildkit/worker/base" +) + +func (c *Client) createController() error { + sm, err := c.getSessionManager() + if err != nil { + return fmt.Errorf("creating session manager failed: %v", err) + } + // Create the worker opts. + opt, err := c.createWorkerOpt(true) + if err != nil { + return fmt.Errorf("creating worker opt failed: %v", err) + } + + // Create the new worker. + w, err := base.NewWorker(opt) + if err != nil { + return fmt.Errorf("creating worker failed: %v", err) + } + + // Create the worker controller. + wc := &worker.Controller{} + if err := wc.Add(w); err != nil { + return fmt.Errorf("adding worker to worker controller failed: %v", err) + } + + // Add the frontends. + frontends := map[string]frontend.Frontend{} + frontends["dockerfile.v0"] = forwarder.NewGatewayForwarder(wc, builder.Build) + frontends["gateway.v0"] = gateway.NewGatewayFrontend(wc) + + // Create the cache storage + cacheStorage, err := bboltcachestorage.NewStore(filepath.Join(c.root, "cache.db")) + if err != nil { + return err + } + + remoteCacheExporterFuncs := map[string]remotecache.ResolveCacheExporterFunc{ + "inline": inlineremotecache.ResolveCacheExporterFunc(), + "local": localremotecache.ResolveCacheExporterFunc(sm), + "registry": registryremotecache.ResolveCacheExporterFunc(sm, docker.ConfigureDefaultRegistries()), + } + remoteCacheImporterFuncs := map[string]remotecache.ResolveCacheImporterFunc{ + "local": localremotecache.ResolveCacheImporterFunc(sm), + "registry": registryremotecache.ResolveCacheImporterFunc(sm, opt.ContentStore, docker.ConfigureDefaultRegistries()), + } + + // Create the controller. + controller, err := control.NewController(control.Opt{ + SessionManager: sm, + WorkerController: wc, + Frontends: frontends, + ResolveCacheExporterFuncs: remoteCacheExporterFuncs, + ResolveCacheImporterFuncs: remoteCacheImporterFuncs, + CacheKeyStorage: cacheStorage, + }) + if err != nil { + return fmt.Errorf("creating new controller failed: %v", err) + } + + // Set the controller for the client. + c.controller = controller + + return nil +} diff --git a/vendor/github.com/genuinetools/img/client/diskusage.go b/vendor/github.com/genuinetools/img/client/diskusage.go new file mode 100644 index 00000000..e33e2a63 --- /dev/null +++ b/vendor/github.com/genuinetools/img/client/diskusage.go @@ -0,0 +1,26 @@ +package client + +import ( + "context" + "fmt" + + controlapi "github.com/moby/buildkit/api/services/control" +) + +// DiskUsage returns the disk usage being consumed by the buildkit controller. +func (c *Client) DiskUsage(ctx context.Context, req *controlapi.DiskUsageRequest) (*controlapi.DiskUsageResponse, error) { + if c.controller == nil { + // Create the controller. + if err := c.createController(); err != nil { + return nil, err + } + } + + // Call diskusage. + resp, err := c.controller.DiskUsage(ctx, req) + if err != nil { + return nil, fmt.Errorf("getting disk usage failed: %v", err) + } + + return resp, nil +} diff --git a/vendor/github.com/genuinetools/img/client/list.go b/vendor/github.com/genuinetools/img/client/list.go new file mode 100644 index 00000000..311cfe2c --- /dev/null +++ b/vendor/github.com/genuinetools/img/client/list.go @@ -0,0 +1,67 @@ +package client + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/containerd/containerd/content/local" + "github.com/containerd/containerd/images" + ctdmetadata "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/platforms" + bolt "go.etcd.io/bbolt" +) + +// ListedImage represents an image structure returuned from ListImages. +// It extends containerd/images.Image with extra fields. +type ListedImage struct { + images.Image + ContentSize int64 +} + +// ListImages returns the images from the image store. +func (c *Client) ListImages(ctx context.Context, filters ...string) ([]ListedImage, error) { + dbPath := filepath.Join(c.root, "containerdmeta.db") + if _, err := os.Stat(dbPath); os.IsNotExist(err) { + // The metadata database does not exist so we should just return as if there + // were no results. + return nil, nil + } + + // Open the bolt database for metadata. + // Since we are only listing we can open it as read-only. + db, err := bolt.Open(dbPath, 0644, &bolt.Options{ReadOnly: true}) + if err != nil { + return nil, fmt.Errorf("opening boltdb failed: %v", err) + } + + // Create the content store locally. + contentStore, err := local.NewStore(filepath.Join(c.root, "content")) + if err != nil { + return nil, fmt.Errorf("creating content store failed: %v", err) + } + + // Create the database for metadata. + mdb := ctdmetadata.NewDB(db, contentStore, nil) + + // Create the image store. + imageStore := ctdmetadata.NewImageStore(mdb) + + // List the images in the image store. + i, err := imageStore.List(ctx, filters...) + if err != nil { + return nil, fmt.Errorf("listing images with filters (%s) failed: %v", strings.Join(filters, ", "), err) + } + + listedImages := []ListedImage{} + for _, image := range i { + size, err := image.Size(ctx, contentStore, platforms.Default()) + if err != nil { + return nil, fmt.Errorf("calculating size of image %s failed: %v", image.Name, err) + } + listedImages = append(listedImages, ListedImage{Image: image, ContentSize: size}) + } + return listedImages, nil +} diff --git a/vendor/github.com/genuinetools/img/client/prune.go b/vendor/github.com/genuinetools/img/client/prune.go new file mode 100644 index 00000000..7f968f32 --- /dev/null +++ b/vendor/github.com/genuinetools/img/client/prune.go @@ -0,0 +1,61 @@ +package client + +import ( + "context" + "fmt" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/worker/base" + "golang.org/x/sync/errgroup" +) + +// Prune calls Prune on the worker. +func (c *Client) Prune(ctx context.Context) ([]*controlapi.UsageRecord, error) { + ch := make(chan client.UsageInfo) + + // Create the worker opts. + opt, err := c.createWorkerOpt(false) + if err != nil { + return nil, fmt.Errorf("creating worker opt failed: %v", err) + } + + // Create the new worker. + w, err := base.NewWorker(opt) + if err != nil { + return nil, fmt.Errorf("creating worker failed: %v", err) + } + + eg, ctx := errgroup.WithContext(ctx) + eg.Go(func() error { + // Call prune on the worker. + return w.Prune(ctx, ch) + }) + + eg2, ctx := errgroup.WithContext(ctx) + eg2.Go(func() error { + defer close(ch) + return eg.Wait() + }) + + usage := []*controlapi.UsageRecord{} + eg2.Go(func() error { + for r := range ch { + usage = append(usage, &controlapi.UsageRecord{ + ID: r.ID, + Mutable: r.Mutable, + InUse: r.InUse, + Size_: r.Size, + Parent: r.Parent, + UsageCount: int64(r.UsageCount), + Description: r.Description, + CreatedAt: r.CreatedAt, + LastUsedAt: r.LastUsedAt, + }) + } + + return nil + }) + + return usage, eg2.Wait() +} diff --git a/vendor/github.com/genuinetools/img/client/pull.go b/vendor/github.com/genuinetools/img/client/pull.go new file mode 100644 index 00000000..49d1ffe1 --- /dev/null +++ b/vendor/github.com/genuinetools/img/client/pull.go @@ -0,0 +1,117 @@ +package client + +import ( + "context" + "fmt" + + "github.com/containerd/containerd/platforms" + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/exporter" + imageexporter "github.com/moby/buildkit/exporter/containerimage" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/source/containerimage" +) + +// Pull retrieves an image from a remote registry. +func (c *Client) Pull(ctx context.Context, image string) (*ListedImage, error) { + sm, err := c.getSessionManager() + if err != nil { + return nil, err + } + // Parse the image name and tag. + named, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, fmt.Errorf("parsing image name %q failed: %v", image, err) + } + // Add the latest lag if they did not provide one. + named = reference.TagNameOnly(named) + image = named.String() + + // Get the identifier for the image. + identifier, err := source.NewImageIdentifier(image) + if err != nil { + return nil, err + } + + // Create the worker opts. + opt, err := c.createWorkerOpt(false) + if err != nil { + return nil, fmt.Errorf("creating worker opt failed: %v", err) + } + + cm, err := cache.NewManager(cache.ManagerOpt{ + Snapshotter: opt.Snapshotter, + MetadataStore: opt.MetadataStore, + ContentStore: opt.ContentStore, + LeaseManager: opt.LeaseManager, + GarbageCollect: opt.GarbageCollect, + Applier: opt.Applier, + }) + if err != nil { + return nil, err + } + + // Create the source for the pull. + srcOpt := containerimage.SourceOpt{ + Snapshotter: opt.Snapshotter, + ContentStore: opt.ContentStore, + Applier: opt.Applier, + CacheAccessor: cm, + ImageStore: opt.ImageStore, + RegistryHosts: opt.RegistryHosts, + LeaseManager: opt.LeaseManager, + } + src, err := containerimage.NewSource(srcOpt) + if err != nil { + return nil, err + } + s, err := src.Resolve(ctx, identifier, sm) + if err != nil { + return nil, err + } + ref, err := s.Snapshot(ctx) + if err != nil { + return nil, err + } + + // Create the exporter for the pull. + iw, err := imageexporter.NewImageWriter(imageexporter.WriterOpt{ + Snapshotter: opt.Snapshotter, + ContentStore: opt.ContentStore, + Differ: opt.Differ, + }) + if err != nil { + return nil, err + } + expOpt := imageexporter.Opt{ + SessionManager: sm, + ImageWriter: iw, + Images: opt.ImageStore, + RegistryHosts: opt.RegistryHosts, + LeaseManager: opt.LeaseManager, + } + exp, err := imageexporter.New(expOpt) + if err != nil { + return nil, err + } + e, err := exp.Resolve(ctx, map[string]string{"name": image}) + if err != nil { + return nil, err + } + if _, err := e.Export(ctx, exporter.Source{Ref: ref}); err != nil { + return nil, err + } + + // Get the image. + img, err := opt.ImageStore.Get(ctx, image) + if err != nil { + return nil, fmt.Errorf("getting image %s from image store failed: %v", image, err) + } + size, err := img.Size(ctx, opt.ContentStore, platforms.Default()) + if err != nil { + return nil, fmt.Errorf("calculating size of image %s failed: %v", img.Name, err) + } + + return &ListedImage{Image: img, ContentSize: size}, nil +} diff --git a/vendor/github.com/genuinetools/img/client/push.go b/vendor/github.com/genuinetools/img/client/push.go new file mode 100644 index 00000000..fd0cc1a4 --- /dev/null +++ b/vendor/github.com/genuinetools/img/client/push.go @@ -0,0 +1,38 @@ +package client + +import ( + "context" + "fmt" + + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/util/push" +) + +// Push sends an image to a remote registry. +func (c *Client) Push(ctx context.Context, image string, insecure bool) error { + // Parse the image name and tag. + named, err := reference.ParseNormalizedNamed(image) + if err != nil { + return fmt.Errorf("parsing image name %q failed: %v", image, err) + } + // Add the latest lag if they did not provide one. + named = reference.TagNameOnly(named) + image = named.String() + + // Create the worker opts. + opt, err := c.createWorkerOpt(false) + if err != nil { + return fmt.Errorf("creating worker opt failed: %v", err) + } + + imgObj, err := opt.ImageStore.Get(ctx, image) + if err != nil { + return fmt.Errorf("getting image %q failed: %v", image, err) + } + + sm, err := c.getSessionManager() + if err != nil { + return err + } + return push.Push(ctx, sm, opt.ContentStore, imgObj.Target.Digest, image, insecure, opt.RegistryHosts, false) +} diff --git a/vendor/github.com/genuinetools/img/client/remove.go b/vendor/github.com/genuinetools/img/client/remove.go new file mode 100644 index 00000000..b94d7931 --- /dev/null +++ b/vendor/github.com/genuinetools/img/client/remove.go @@ -0,0 +1,34 @@ +package client + +import ( + "context" + "fmt" + + "github.com/containerd/containerd/images" + "github.com/docker/distribution/reference" +) + +// RemoveImage removes image from the image store. +func (c *Client) RemoveImage(ctx context.Context, image string) error { + named, err := reference.ParseNormalizedNamed(image) + if err != nil { + return fmt.Errorf("parsing image name %q failed: %v", image, err) + } + // Add the latest lag if they did not provide one. + named = reference.TagNameOnly(named) + image = named.String() + + // Create the worker opts. + opt, err := c.createWorkerOpt(false) + if err != nil { + return fmt.Errorf("creating worker opt failed: %v", err) + } + + // Remove the image from the image store. + err = opt.ImageStore.Delete(ctx, image, images.SynchronousDelete()) + if err != nil { + return fmt.Errorf("removing image failed: %v", err) + } + + return nil +} diff --git a/vendor/github.com/genuinetools/img/client/save.go b/vendor/github.com/genuinetools/img/client/save.go new file mode 100644 index 00000000..24c9eca6 --- /dev/null +++ b/vendor/github.com/genuinetools/img/client/save.go @@ -0,0 +1,53 @@ +package client + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/containerd/containerd/images/archive" + "github.com/docker/distribution/reference" +) + +// SaveImage exports an image as a tarball which can then be imported by docker. +func (c *Client) SaveImage(ctx context.Context, image, format string, writer io.WriteCloser) error { + // Parse the image name and tag. + named, err := reference.ParseNormalizedNamed(image) + if err != nil { + return fmt.Errorf("parsing image name %q failed: %v", image, err) + } + // Add the latest lag if they did not provide one. + named = reference.TagNameOnly(named) + image = named.String() + + // Create the worker opts. + opt, err := c.createWorkerOpt(false) + if err != nil { + return fmt.Errorf("creating worker opt failed: %v", err) + } + + if opt.ImageStore == nil { + return errors.New("image store is nil") + } + + exportOpts := []archive.ExportOpt{ + archive.WithImage(opt.ImageStore, image), + } + + switch format { + case "docker": + + case "oci": + exportOpts = append(exportOpts, archive.WithSkipDockerManifest()) + + default: + return fmt.Errorf("%q is not a valid format", format) + } + + if err := archive.Export(ctx, opt.ContentStore, writer, exportOpts...); err != nil { + return fmt.Errorf("exporting image %s failed: %v", image, err) + } + + return writer.Close() +} diff --git a/vendor/github.com/genuinetools/img/client/session.go b/vendor/github.com/genuinetools/img/client/session.go new file mode 100644 index 00000000..01b3a5d5 --- /dev/null +++ b/vendor/github.com/genuinetools/img/client/session.go @@ -0,0 +1,49 @@ +package client + +import ( + "context" + "os" + + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/auth/authprovider" + "github.com/moby/buildkit/session/filesync" + "github.com/moby/buildkit/session/testutil" + "github.com/pkg/errors" +) + +func (c *Client) getSessionManager() (*session.Manager, error) { + if c.sessionManager == nil { + var err error + c.sessionManager, err = session.NewManager() + if err != nil { + return nil, err + } + } + return c.sessionManager, nil +} + +// Session creates the session manager and returns the session and it's +// dialer. +func (c *Client) Session(ctx context.Context) (*session.Session, session.Dialer, error) { + m, err := c.getSessionManager() + if err != nil { + return nil, nil, errors.Wrap(err, "failed to create session manager") + } + sessionName := "img" + s, err := session.NewSession(ctx, sessionName, "") + if err != nil { + return nil, nil, errors.Wrap(err, "failed to create session") + } + syncedDirs := make([]filesync.SyncedDir, 0, len(c.localDirs)) + for name, d := range c.localDirs { + syncedDirs = append(syncedDirs, filesync.SyncedDir{Name: name, Dir: d}) + } + s.Allow(filesync.NewFSSyncProvider(syncedDirs)) + s.Allow(authprovider.NewDockerAuthProvider(os.Stderr)) + return s, sessionDialer(s, m), err +} + +func sessionDialer(s *session.Session, m *session.Manager) session.Dialer { + // FIXME: rename testutil + return session.Dialer(testutil.TestStream(testutil.Handler(m.HandleConn))) +} diff --git a/vendor/github.com/genuinetools/img/client/solve.go b/vendor/github.com/genuinetools/img/client/solve.go new file mode 100644 index 00000000..4ec4a0b2 --- /dev/null +++ b/vendor/github.com/genuinetools/img/client/solve.go @@ -0,0 +1,68 @@ +package client + +import ( + "context" + "time" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" +) + +// Solve calls Solve on the controller. +func (c *Client) Solve(ctx context.Context, req *controlapi.SolveRequest, ch chan *controlapi.StatusResponse) error { + defer close(ch) + if c.controller == nil { + // Create the controller. + if err := c.createController(); err != nil { + return err + } + } + + statusCtx, cancelStatus := context.WithCancel(context.Background()) + eg, ctx := errgroup.WithContext(ctx) + eg.Go(func() error { + defer func() { // make sure the Status ends cleanly on build errors + go func() { + <-time.After(3 * time.Second) + cancelStatus() + }() + }() + _, err := c.controller.Solve(ctx, req) + if err != nil { + return errors.Wrap(err, "failed to solve") + } + return nil + }) + + eg.Go(func() error { + srv := &controlStatusServer{ + ctx: statusCtx, + ch: ch, + } + return c.controller.Status(&controlapi.StatusRequest{ + Ref: req.Ref, + }, srv) + }) + return eg.Wait() +} + +type controlStatusServer struct { + ctx context.Context + ch chan *controlapi.StatusResponse + grpc.ServerStream // dummy +} + +func (x *controlStatusServer) SendMsg(m interface{}) error { + return x.Send(m.(*controlapi.StatusResponse)) +} + +func (x *controlStatusServer) Send(m *controlapi.StatusResponse) error { + x.ch <- m + return nil +} + +func (x *controlStatusServer) Context() context.Context { + return x.ctx +} diff --git a/vendor/github.com/genuinetools/img/client/tag.go b/vendor/github.com/genuinetools/img/client/tag.go new file mode 100644 index 00000000..cdfe1164 --- /dev/null +++ b/vendor/github.com/genuinetools/img/client/tag.go @@ -0,0 +1,68 @@ +package client + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/docker/distribution/reference" +) + +// TagImage creates a reference to an image with a specific name in the image store. +func (c *Client) TagImage(ctx context.Context, src, dest string) error { + // Parse the image name and tag for the src image. + named, err := reference.ParseNormalizedNamed(src) + if err != nil { + return fmt.Errorf("parsing image name %q failed: %v", src, err) + } + // Add the latest lag if they did not provide one. + named = reference.TagNameOnly(named) + src = named.String() + + // Parse the image name and tag for the dest image. + named, err = reference.ParseNormalizedNamed(dest) + if err != nil { + return fmt.Errorf("parsing image name %q failed: %v", dest, err) + } + // Add the latest lag if they did not provide one. + named = reference.TagNameOnly(named) + dest = named.String() + + // Create the worker opts. + opt, err := c.createWorkerOpt(false) + if err != nil { + return fmt.Errorf("creating worker opt failed: %v", err) + } + + if opt.ImageStore == nil { + return errors.New("image store is nil") + } + + // Get the source image. + image, err := opt.ImageStore.Get(ctx, src) + if err != nil { + return fmt.Errorf("getting image %s from image store failed: %v", src, err) + } + + // Update the target image. Create it if it does not exist. + img := images.Image{ + Name: dest, + Target: image.Target, + CreatedAt: time.Now(), + } + if _, err := opt.ImageStore.Update(ctx, img); err != nil { + if !errdefs.IsNotFound(err) { + return fmt.Errorf("updating image store for %s failed: %v", dest, err) + } + + // Create it if we didn't find it. + if _, err := opt.ImageStore.Create(ctx, img); err != nil { + return fmt.Errorf("creating image in image store for %s failed: %v", dest, err) + } + } + + return nil +} diff --git a/vendor/github.com/genuinetools/img/client/unpack.go b/vendor/github.com/genuinetools/img/client/unpack.go new file mode 100644 index 00000000..8eb5baf9 --- /dev/null +++ b/vendor/github.com/genuinetools/img/client/unpack.go @@ -0,0 +1,75 @@ +package client + +import ( + "context" + "errors" + "fmt" + "os" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/archive" + "github.com/sirupsen/logrus" +) + +// Unpack exports an image to a rootfs destination directory. +func (c *Client) Unpack(ctx context.Context, image, dest string) error { + if len(dest) < 1 { + return errors.New("destination directory for rootfs cannot be empty") + } + + if _, err := os.Stat(dest); err == nil { + return fmt.Errorf("destination directory already exists: %s", dest) + } + + // Parse the image name and tag. + named, err := reference.ParseNormalizedNamed(image) + if err != nil { + return fmt.Errorf("parsing image name %q failed: %v", image, err) + } + // Add the latest lag if they did not provide one. + named = reference.TagNameOnly(named) + image = named.String() + + // Create the worker opts. + opt, err := c.createWorkerOpt(true) + if err != nil { + return fmt.Errorf("creating worker opt failed: %v", err) + } + + if opt.ImageStore == nil { + return errors.New("image store is nil") + } + + img, err := opt.ImageStore.Get(ctx, image) + if err != nil { + return fmt.Errorf("getting image %s from image store failed: %v", image, err) + } + + manifest, err := images.Manifest(ctx, opt.ContentStore, img.Target, platforms.Default()) + if err != nil { + return fmt.Errorf("getting image manifest failed: %v", err) + } + + for _, desc := range manifest.Layers { + logrus.Debugf("Unpacking layer %s", desc.Digest.String()) + + // Read the blob from the content store. + layer, err := opt.ContentStore.ReaderAt(ctx, desc) + if err != nil { + return fmt.Errorf("getting reader for digest %s failed: %v", desc.Digest.String(), err) + } + + // Unpack the tarfile to the rootfs path. + // FROM: https://godoc.org/github.com/moby/moby/pkg/archive#TarOptions + if err := archive.Untar(content.NewReader(layer), dest, &archive.TarOptions{ + NoLchown: true, + }); err != nil { + return fmt.Errorf("extracting tar for %s to directory %s failed: %v", desc.Digest.String(), dest, err) + } + } + + return nil +} diff --git a/vendor/github.com/genuinetools/img/client/workeropt.go b/vendor/github.com/genuinetools/img/client/workeropt.go new file mode 100644 index 00000000..58640c0c --- /dev/null +++ b/vendor/github.com/genuinetools/img/client/workeropt.go @@ -0,0 +1,156 @@ +package client + +import ( + "context" + "fmt" + "github.com/containerd/containerd/remotes/docker" + "github.com/moby/buildkit/util/leaseutil" + "os/exec" + "path/filepath" + "syscall" + + "github.com/containerd/containerd/content/local" + "github.com/containerd/containerd/diff/apply" + "github.com/containerd/containerd/diff/walking" + ctdmetadata "github.com/containerd/containerd/metadata" + "github.com/containerd/containerd/platforms" + ctdsnapshot "github.com/containerd/containerd/snapshots" + "github.com/containerd/containerd/snapshots/native" + "github.com/containerd/containerd/snapshots/overlay" + "github.com/genuinetools/img/types" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/executor" + executoroci "github.com/moby/buildkit/executor/oci" + "github.com/moby/buildkit/executor/runcexecutor" + containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" + "github.com/moby/buildkit/util/binfmt_misc" + "github.com/moby/buildkit/util/network/netproviders" + "github.com/moby/buildkit/worker/base" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/opencontainers/runc/libcontainer/system" + "github.com/sirupsen/logrus" + bolt "go.etcd.io/bbolt" +) + +// createWorkerOpt creates a base.WorkerOpt to be used for a new worker. +func (c *Client) createWorkerOpt(withExecutor bool) (opt base.WorkerOpt, err error) { + // Create the metadata store. + md, err := metadata.NewStore(filepath.Join(c.root, "metadata.db")) + if err != nil { + return opt, err + } + + snapshotRoot := filepath.Join(c.root, "snapshots") + unprivileged := system.GetParentNSeuid() != 0 + + // Create the snapshotter. + var ( + s ctdsnapshot.Snapshotter + ) + switch c.backend { + case types.NativeBackend: + s, err = native.NewSnapshotter(snapshotRoot) + case types.OverlayFSBackend: + // On some distros such as Ubuntu overlayfs can be mounted without privileges + s, err = overlay.NewSnapshotter(snapshotRoot) + default: + // "auto" backend needs to be already resolved on Client instantiation + return opt, fmt.Errorf("%s is not a valid snapshots backend", c.backend) + } + if err != nil { + return opt, fmt.Errorf("creating %s snapshotter failed: %v", c.backend, err) + } + + var exe executor.Executor + if withExecutor { + exeOpt := runcexecutor.Opt{ + Root: filepath.Join(c.root, "executor"), + Rootless: unprivileged, + ProcessMode: processMode(), + } + + np, err := netproviders.Providers(netproviders.Opt{Mode: "auto"}) + if err != nil { + return base.WorkerOpt{}, err + } + + exe, err = runcexecutor.New(exeOpt, np) + if err != nil { + return opt, err + } + } + + // Create the content store locally. + contentStore, err := local.NewStore(filepath.Join(c.root, "content")) + if err != nil { + return opt, err + } + + // Open the bolt database for metadata. + db, err := bolt.Open(filepath.Join(c.root, "containerdmeta.db"), 0644, nil) + if err != nil { + return opt, err + } + + // Create the new database for metadata. + mdb := ctdmetadata.NewDB(db, contentStore, map[string]ctdsnapshot.Snapshotter{ + c.backend: s, + }) + if err := mdb.Init(context.TODO()); err != nil { + return opt, err + } + + // Create the image store. + imageStore := ctdmetadata.NewImageStore(mdb) + + contentStore = containerdsnapshot.NewContentStore(mdb.ContentStore(), "buildkit") + + id, err := base.ID(c.root) + if err != nil { + return opt, err + } + + xlabels := base.Labels("oci", c.backend) + + var supportedPlatforms []specs.Platform + for _, p := range binfmt_misc.SupportedPlatforms(false) { + parsed, err := platforms.Parse(p) + if err != nil { + return opt, err + } + supportedPlatforms = append(supportedPlatforms, platforms.Normalize(parsed)) + } + + opt = base.WorkerOpt{ + ID: id, + Labels: xlabels, + MetadataStore: md, + Executor: exe, + Snapshotter: containerdsnapshot.NewSnapshotter(c.backend, mdb.Snapshotter(c.backend), "buildkit", nil), + ContentStore: contentStore, + Applier: apply.NewFileSystemApplier(contentStore), + Differ: walking.NewWalkingDiff(contentStore), + ImageStore: imageStore, + Platforms: supportedPlatforms, + RegistryHosts: docker.ConfigureDefaultRegistries(), + LeaseManager: leaseutil.WithNamespace(ctdmetadata.NewLeaseManager(mdb), "buildkit"), + GarbageCollect: mdb.GarbageCollect, + } + + return opt, err +} + +func processMode() executoroci.ProcessMode { + mountArgs := []string{"-t", "proc", "none", "/proc"} + cmd := exec.Command("mount", mountArgs...) + cmd.SysProcAttr = &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGKILL, + Cloneflags: syscall.CLONE_NEWPID, + Unshareflags: syscall.CLONE_NEWNS, + } + if b, err := cmd.CombinedOutput(); err != nil { + logrus.Warnf("Process sandbox is not available, consider unmasking procfs: %v", string(b)) + return executoroci.NoProcessSandbox + } + return executoroci.ProcessSandbox +} diff --git a/vendor/github.com/genuinetools/img/types/types.go b/vendor/github.com/genuinetools/img/types/types.go new file mode 100644 index 00000000..c73d7363 --- /dev/null +++ b/vendor/github.com/genuinetools/img/types/types.go @@ -0,0 +1,10 @@ +package types + +const ( + // AutoBackend is automatically resolved into either overlayfs or native. + AutoBackend = "auto" + // NativeBackend defines the native backend. + NativeBackend = "native" + // OverlayFSBackend defines the overlayfs backend. + OverlayFSBackend = "overlayfs" +) diff --git a/vendor/github.com/gofrs/flock/.gitignore b/vendor/github.com/gofrs/flock/.gitignore new file mode 100644 index 00000000..daf913b1 --- /dev/null +++ b/vendor/github.com/gofrs/flock/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/gofrs/flock/.travis.yml b/vendor/github.com/gofrs/flock/.travis.yml new file mode 100644 index 00000000..b791a742 --- /dev/null +++ b/vendor/github.com/gofrs/flock/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.10.x + - 1.11.x +script: go test -v -check.vv -race ./... +sudo: false +notifications: + email: + on_success: never + on_failure: always diff --git a/vendor/github.com/gofrs/flock/LICENSE b/vendor/github.com/gofrs/flock/LICENSE new file mode 100644 index 00000000..aff7d358 --- /dev/null +++ b/vendor/github.com/gofrs/flock/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015, Tim Heckman +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of linode-netint nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gofrs/flock/README.md b/vendor/github.com/gofrs/flock/README.md new file mode 100644 index 00000000..7375e72e --- /dev/null +++ b/vendor/github.com/gofrs/flock/README.md @@ -0,0 +1,41 @@ +# flock +[![TravisCI Build Status](https://img.shields.io/travis/gofrs/flock/master.svg?style=flat)](https://travis-ci.org/gofrs/flock) +[![GoDoc](https://img.shields.io/badge/godoc-go--flock-blue.svg?style=flat)](https://godoc.org/github.com/gofrs/flock) +[![License](https://img.shields.io/badge/license-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/gofrs/flock/blob/master/LICENSE) +[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/flock)](https://goreportcard.com/report/github.com/gofrs/flock) + +`flock` implements a thread-safe sync.Locker interface for file locking. It also +includes a non-blocking TryLock() function to allow locking without blocking execution. + +## License +`flock` is released under the BSD 3-Clause License. See the `LICENSE` file for more details. + +## Go Compatibility +This package makes use of the `context` package that was introduced in Go 1.7. As such, this +package has an implicit dependency on Go 1.7+. + +## Installation +``` +go get -u github.com/gofrs/flock +``` + +## Usage +```Go +import "github.com/gofrs/flock" + +fileLock := flock.New("/var/lock/go-lock.lock") + +locked, err := fileLock.TryLock() + +if err != nil { + // handle locking error +} + +if locked { + // do work + fileLock.Unlock() +} +``` + +For more detailed usage information take a look at the package API docs on +[GoDoc](https://godoc.org/github.com/gofrs/flock). diff --git a/vendor/github.com/gofrs/flock/appveyor.yml b/vendor/github.com/gofrs/flock/appveyor.yml new file mode 100644 index 00000000..6848e94b --- /dev/null +++ b/vendor/github.com/gofrs/flock/appveyor.yml @@ -0,0 +1,25 @@ +version: '{build}' + +build: false +deploy: false + +clone_folder: 'c:\gopath\src\github.com\gofrs\flock' + +environment: + GOPATH: 'c:\gopath' + GOVERSION: '1.11' + +init: + - git config --global core.autocrlf input + +install: + - rmdir c:\go /s /q + - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi + - msiexec /i go%GOVERSION%.windows-amd64.msi /q + - set Path=c:\go\bin;c:\gopath\bin;%Path% + - go version + - go env + +test_script: + - go get -t ./... + - go test -race -v ./... diff --git a/vendor/github.com/gofrs/flock/flock.go b/vendor/github.com/gofrs/flock/flock.go new file mode 100644 index 00000000..8f109b8a --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock.go @@ -0,0 +1,127 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// Package flock implements a thread-safe interface for file locking. +// It also includes a non-blocking TryLock() function to allow locking +// without blocking execution. +// +// Package flock is released under the BSD 3-Clause License. See the LICENSE file +// for more details. +// +// While using this library, remember that the locking behaviors are not +// guaranteed to be the same on each platform. For example, some UNIX-like +// operating systems will transparently convert a shared lock to an exclusive +// lock. If you Unlock() the flock from a location where you believe that you +// have the shared lock, you may accidentally drop the exclusive lock. +package flock + +import ( + "context" + "os" + "sync" + "time" +) + +// Flock is the struct type to handle file locking. All fields are unexported, +// with access to some of the fields provided by getter methods (Path() and Locked()). +type Flock struct { + path string + m sync.RWMutex + fh *os.File + l bool + r bool +} + +// New returns a new instance of *Flock. The only parameter +// it takes is the path to the desired lockfile. +func New(path string) *Flock { + return &Flock{path: path} +} + +// NewFlock returns a new instance of *Flock. The only parameter +// it takes is the path to the desired lockfile. +// +// Deprecated: Use New instead. +func NewFlock(path string) *Flock { + return New(path) +} + +// Close is equivalent to calling Unlock. +// +// This will release the lock and close the underlying file descriptor. +// It will not remove the file from disk, that's up to your application. +func (f *Flock) Close() error { + return f.Unlock() +} + +// Path returns the path as provided in NewFlock(). +func (f *Flock) Path() string { + return f.path +} + +// Locked returns the lock state (locked: true, unlocked: false). +// +// Warning: by the time you use the returned value, the state may have changed. +func (f *Flock) Locked() bool { + f.m.RLock() + defer f.m.RUnlock() + return f.l +} + +// RLocked returns the read lock state (locked: true, unlocked: false). +// +// Warning: by the time you use the returned value, the state may have changed. +func (f *Flock) RLocked() bool { + f.m.RLock() + defer f.m.RUnlock() + return f.r +} + +func (f *Flock) String() string { + return f.path +} + +// TryLockContext repeatedly tries to take an exclusive lock until one of the +// conditions is met: TryLock succeeds, TryLock fails with error, or Context +// Done channel is closed. +func (f *Flock) TryLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { + return tryCtx(ctx, f.TryLock, retryDelay) +} + +// TryRLockContext repeatedly tries to take a shared lock until one of the +// conditions is met: TryRLock succeeds, TryRLock fails with error, or Context +// Done channel is closed. +func (f *Flock) TryRLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { + return tryCtx(ctx, f.TryRLock, retryDelay) +} + +func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Duration) (bool, error) { + if ctx.Err() != nil { + return false, ctx.Err() + } + for { + if ok, err := fn(); ok || err != nil { + return ok, err + } + select { + case <-ctx.Done(): + return false, ctx.Err() + case <-time.After(retryDelay): + // try again + } + } +} + +func (f *Flock) setFh() error { + // open a new os.File instance + // create it if it doesn't exist, and open the file read-only. + fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDONLY, os.FileMode(0600)) + if err != nil { + return err + } + + // set the filehandle on the struct + f.fh = fh + return nil +} diff --git a/vendor/github.com/gofrs/flock/flock_unix.go b/vendor/github.com/gofrs/flock/flock_unix.go new file mode 100644 index 00000000..45f71a70 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_unix.go @@ -0,0 +1,195 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// +build !windows + +package flock + +import ( + "os" + "syscall" +) + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already exclusive-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +// +// If the *Flock has a shared lock (RLock), this may transparently replace the +// shared lock with an exclusive lock on some UNIX-like operating systems. Be +// careful when using exclusive locks in conjunction with shared locks +// (RLock()), because calling Unlock() may accidentally release the exclusive +// lock that was once a shared lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, syscall.LOCK_EX) +} + +// RLock is a blocking call to try and take a shared file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already shared-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, syscall.LOCK_SH) +} + +func (f *Flock) lock(locked *bool, flag int) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + } + + if err := syscall.Flock(int(f.fh.Fd()), flag); err != nil { + shouldRetry, reopenErr := f.reopenFDOnError(err) + if reopenErr != nil { + return reopenErr + } + + if !shouldRetry { + return err + } + + if err = syscall.Flock(int(f.fh.Fd()), flag); err != nil { + return err + } + } + + *locked = true + return nil +} + +// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so +// while it is running the Locked() and RLocked() functions will be blocked. +// +// This function short-circuits if we are unlocked already. If not, it calls +// syscall.LOCK_UN on the file and closes the file descriptor. It does not +// remove the file from disk. It's up to your application to do. +// +// Please note, if your shared lock became an exclusive lock this may +// unintentionally drop the exclusive lock if called by the consumer that +// believes they have a shared lock. Please see Lock() for more details. +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + // mark the file as unlocked + if err := syscall.Flock(int(f.fh.Fd()), syscall.LOCK_UN); err != nil { + return err + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +// TryLock is the preferred function for taking an exclusive file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, syscall.LOCK_EX) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being share-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, syscall.LOCK_SH) +} + +func (f *Flock) try(locked *bool, flag int) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + } + + var retried bool +retry: + err := syscall.Flock(int(f.fh.Fd()), flag|syscall.LOCK_NB) + + switch err { + case syscall.EWOULDBLOCK: + return false, nil + case nil: + *locked = true + return true, nil + } + if !retried { + if shouldRetry, reopenErr := f.reopenFDOnError(err); reopenErr != nil { + return false, reopenErr + } else if shouldRetry { + retried = true + goto retry + } + } + + return false, err +} + +// reopenFDOnError determines whether we should reopen the file handle +// in readwrite mode and try again. This comes from util-linux/sys-utils/flock.c: +// Since Linux 3.4 (commit 55725513) +// Probably NFSv4 where flock() is emulated by fcntl(). +func (f *Flock) reopenFDOnError(err error) (bool, error) { + if err != syscall.EIO && err != syscall.EBADF { + return false, nil + } + if st, err := f.fh.Stat(); err == nil { + // if the file is able to be read and written + if st.Mode()&0600 == 0600 { + f.fh.Close() + f.fh = nil + + // reopen in read-write mode and set the filehandle + fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDWR, os.FileMode(0600)) + if err != nil { + return false, err + } + f.fh = fh + return true, nil + } + } + + return false, nil +} diff --git a/vendor/github.com/gofrs/flock/flock_winapi.go b/vendor/github.com/gofrs/flock/flock_winapi.go new file mode 100644 index 00000000..fe405a25 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_winapi.go @@ -0,0 +1,76 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// +build windows + +package flock + +import ( + "syscall" + "unsafe" +) + +var ( + kernel32, _ = syscall.LoadLibrary("kernel32.dll") + procLockFileEx, _ = syscall.GetProcAddress(kernel32, "LockFileEx") + procUnlockFileEx, _ = syscall.GetProcAddress(kernel32, "UnlockFileEx") +) + +const ( + winLockfileFailImmediately = 0x00000001 + winLockfileExclusiveLock = 0x00000002 + winLockfileSharedLock = 0x00000000 +) + +// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows +// `LockFileEX` docs, which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as: +// +// > The function requests an exclusive lock. Otherwise, it requests a shared +// > lock. +// +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + +func lockFileEx(handle syscall.Handle, flags uint32, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { + r1, _, errNo := syscall.Syscall6( + uintptr(procLockFileEx), + 6, + uintptr(handle), + uintptr(flags), + uintptr(reserved), + uintptr(numberOfBytesToLockLow), + uintptr(numberOfBytesToLockHigh), + uintptr(unsafe.Pointer(offset))) + + if r1 != 1 { + if errNo == 0 { + return false, syscall.EINVAL + } + + return false, errNo + } + + return true, 0 +} + +func unlockFileEx(handle syscall.Handle, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { + r1, _, errNo := syscall.Syscall6( + uintptr(procUnlockFileEx), + 5, + uintptr(handle), + uintptr(reserved), + uintptr(numberOfBytesToLockLow), + uintptr(numberOfBytesToLockHigh), + uintptr(unsafe.Pointer(offset)), + 0) + + if r1 != 1 { + if errNo == 0 { + return false, syscall.EINVAL + } + + return false, errNo + } + + return true, 0 +} diff --git a/vendor/github.com/gofrs/flock/flock_windows.go b/vendor/github.com/gofrs/flock/flock_windows.go new file mode 100644 index 00000000..9f4a5f10 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_windows.go @@ -0,0 +1,140 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +package flock + +import ( + "syscall" +) + +// ErrorLockViolation is the error code returned from the Windows syscall when a +// lock would block and you ask to fail immediately. +const ErrorLockViolation syscall.Errno = 0x21 // 33 + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, winLockfileExclusiveLock) +} + +// RLock is a blocking call to try and take a shared file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, winLockfileSharedLock) +} + +func (f *Flock) lock(locked *bool, flag uint32) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + } + + if _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { + return errNo + } + + *locked = true + return nil +} + +// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so +// while it is running the Locked() and RLocked() functions will be blocked. +// +// This function short-circuits if we are unlocked already. If not, it calls +// UnlockFileEx() on the file and closes the file descriptor. It does not remove +// the file from disk. It's up to your application to do. +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + // mark the file as unlocked + if _, errNo := unlockFileEx(syscall.Handle(f.fh.Fd()), 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { + return errNo + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +// TryLock is the preferred function for taking an exclusive file lock. This +// function does take a RW-mutex lock before it tries to lock the file, so there +// is the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, winLockfileExclusiveLock) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function does take a RW-mutex lock before it tries to lock the file, so there +// is the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being shared-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, winLockfileSharedLock) +} + +func (f *Flock) try(locked *bool, flag uint32) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + } + + _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{}) + + if errNo > 0 { + if errNo == ErrorLockViolation || errNo == syscall.ERROR_IO_PENDING { + return false, nil + } + + return false, errNo + } + + *locked = true + + return true, nil +} diff --git a/vendor/github.com/gogo/googleapis/LICENSE b/vendor/github.com/gogo/googleapis/LICENSE new file mode 100644 index 00000000..d6f85b18 --- /dev/null +++ b/vendor/github.com/gogo/googleapis/LICENSE @@ -0,0 +1,203 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015, Google Inc + Copyright 2018, GoGo Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go b/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go new file mode 100644 index 00000000..1f4d4d43 --- /dev/null +++ b/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go @@ -0,0 +1,257 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/rpc/code.proto + +package rpc + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" + strconv "strconv" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// The canonical error codes for Google APIs. +// +// +// Sometimes multiple error codes may apply. Services should return +// the most specific error code that applies. For example, prefer +// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. +// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`. +type Code int32 + +const ( + // Not an error; returned on success + // + // HTTP Mapping: 200 OK + OK Code = 0 + // The operation was cancelled, typically by the caller. + // + // HTTP Mapping: 499 Client Closed Request + CANCELLED Code = 1 + // Unknown error. For example, this error may be returned when + // a `Status` value received from another address space belongs to + // an error space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + // + // HTTP Mapping: 500 Internal Server Error + UNKNOWN Code = 2 + // The client specified an invalid argument. Note that this differs + // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + // + // HTTP Mapping: 400 Bad Request + INVALID_ARGUMENT Code = 3 + // The deadline expired before the operation could complete. For operations + // that change the state of the system, this error may be returned + // even if the operation has completed successfully. For example, a + // successful response from a server could have been delayed long + // enough for the deadline to expire. + // + // HTTP Mapping: 504 Gateway Timeout + DEADLINE_EXCEEDED Code = 4 + // Some requested entity (e.g., file or directory) was not found. + // + // Note to server developers: if a request is denied for an entire class + // of users, such as gradual feature rollout or undocumented whitelist, + // `NOT_FOUND` may be used. If a request is denied for some users within + // a class of users, such as user-based access control, `PERMISSION_DENIED` + // must be used. + // + // HTTP Mapping: 404 Not Found + NOT_FOUND Code = 5 + // The entity that a client attempted to create (e.g., file or directory) + // already exists. + // + // HTTP Mapping: 409 Conflict + ALREADY_EXISTS Code = 6 + // The caller does not have permission to execute the specified + // operation. `PERMISSION_DENIED` must not be used for rejections + // caused by exhausting some resource (use `RESOURCE_EXHAUSTED` + // instead for those errors). `PERMISSION_DENIED` must not be + // used if the caller can not be identified (use `UNAUTHENTICATED` + // instead for those errors). This error code does not imply the + // request is valid or the requested entity exists or satisfies + // other pre-conditions. + // + // HTTP Mapping: 403 Forbidden + PERMISSION_DENIED Code = 7 + // The request does not have valid authentication credentials for the + // operation. + // + // HTTP Mapping: 401 Unauthorized + UNAUTHENTICATED Code = 16 + // Some resource has been exhausted, perhaps a per-user quota, or + // perhaps the entire file system is out of space. + // + // HTTP Mapping: 429 Too Many Requests + RESOURCE_EXHAUSTED Code = 8 + // The operation was rejected because the system is not in a state + // required for the operation's execution. For example, the directory + // to be deleted is non-empty, an rmdir operation is applied to + // a non-directory, etc. + // + // Service implementors can use the following guidelines to decide + // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: + // (a) Use `UNAVAILABLE` if the client can retry just the failing call. + // (b) Use `ABORTED` if the client should retry at a higher level + // (e.g., when a client-specified test-and-set fails, indicating the + // client should restart a read-modify-write sequence). + // (c) Use `FAILED_PRECONDITION` if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, `FAILED_PRECONDITION` + // should be returned since the client should not retry unless + // the files are deleted from the directory. + // + // HTTP Mapping: 400 Bad Request + FAILED_PRECONDITION Code = 9 + // The operation was aborted, typically due to a concurrency issue such as + // a sequencer check failure or transaction abort. + // + // See the guidelines above for deciding between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`. + // + // HTTP Mapping: 409 Conflict + ABORTED Code = 10 + // The operation was attempted past the valid range. E.g., seeking or + // reading past end-of-file. + // + // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate `INVALID_ARGUMENT` if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // `OUT_OF_RANGE` if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between `FAILED_PRECONDITION` and + // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an `OUT_OF_RANGE` error to detect when + // they are done. + // + // HTTP Mapping: 400 Bad Request + OUT_OF_RANGE Code = 11 + // The operation is not implemented or is not supported/enabled in this + // service. + // + // HTTP Mapping: 501 Not Implemented + UNIMPLEMENTED Code = 12 + // Internal errors. This means that some invariants expected by the + // underlying system have been broken. This error code is reserved + // for serious errors. + // + // HTTP Mapping: 500 Internal Server Error + INTERNAL Code = 13 + // The service is currently unavailable. This is most likely a + // transient condition, which can be corrected by retrying with + // a backoff. + // + // See the guidelines above for deciding between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`. + // + // HTTP Mapping: 503 Service Unavailable + UNAVAILABLE Code = 14 + // Unrecoverable data loss or corruption. + // + // HTTP Mapping: 500 Internal Server Error + DATA_LOSS Code = 15 +) + +var Code_name = map[int32]string{ + 0: "OK", + 1: "CANCELLED", + 2: "UNKNOWN", + 3: "INVALID_ARGUMENT", + 4: "DEADLINE_EXCEEDED", + 5: "NOT_FOUND", + 6: "ALREADY_EXISTS", + 7: "PERMISSION_DENIED", + 16: "UNAUTHENTICATED", + 8: "RESOURCE_EXHAUSTED", + 9: "FAILED_PRECONDITION", + 10: "ABORTED", + 11: "OUT_OF_RANGE", + 12: "UNIMPLEMENTED", + 13: "INTERNAL", + 14: "UNAVAILABLE", + 15: "DATA_LOSS", +} + +var Code_value = map[string]int32{ + "OK": 0, + "CANCELLED": 1, + "UNKNOWN": 2, + "INVALID_ARGUMENT": 3, + "DEADLINE_EXCEEDED": 4, + "NOT_FOUND": 5, + "ALREADY_EXISTS": 6, + "PERMISSION_DENIED": 7, + "UNAUTHENTICATED": 16, + "RESOURCE_EXHAUSTED": 8, + "FAILED_PRECONDITION": 9, + "ABORTED": 10, + "OUT_OF_RANGE": 11, + "UNIMPLEMENTED": 12, + "INTERNAL": 13, + "UNAVAILABLE": 14, + "DATA_LOSS": 15, +} + +func (Code) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_fe593a732623ccf0, []int{0} +} + +func init() { + proto.RegisterEnum("google.rpc.Code", Code_name, Code_value) +} + +func init() { proto.RegisterFile("google/rpc/code.proto", fileDescriptor_fe593a732623ccf0) } + +var fileDescriptor_fe593a732623ccf0 = []byte{ + // 393 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x91, 0x3d, 0x6e, 0x13, 0x41, + 0x14, 0xc7, 0x3d, 0x76, 0x70, 0xe2, 0xf1, 0xd7, 0xcb, 0x84, 0x40, 0x37, 0x07, 0xa0, 0x70, 0x0a, + 0x4e, 0xf0, 0xbc, 0xf3, 0x9c, 0x8c, 0x32, 0x7e, 0xb3, 0x9a, 0x9d, 0x09, 0x01, 0x21, 0xad, 0xc4, + 0xc6, 0x4a, 0x03, 0x5a, 0xcb, 0xe2, 0x00, 0x9c, 0x85, 0x8a, 0x1b, 0x70, 0x85, 0x94, 0x29, 0x29, + 0xf1, 0xa6, 0xa1, 0x74, 0x49, 0x89, 0x06, 0x0a, 0xda, 0x9f, 0xde, 0xc7, 0xff, 0x43, 0x9e, 0xdf, + 0xb7, 0xed, 0xfd, 0xc7, 0xcd, 0xc5, 0x6e, 0xdb, 0x5c, 0x34, 0xed, 0xdd, 0x66, 0xb1, 0xdd, 0xb5, + 0x9f, 0x5b, 0x25, 0xff, 0xe1, 0xc5, 0x6e, 0xdb, 0xbc, 0xfa, 0xde, 0x97, 0x47, 0x45, 0x7b, 0xb7, + 0x51, 0x43, 0xd9, 0xf7, 0xd7, 0xd0, 0x53, 0x53, 0x39, 0x2a, 0x90, 0x0b, 0x72, 0x8e, 0x0c, 0x08, + 0x35, 0x96, 0xc7, 0x89, 0xaf, 0xd9, 0xbf, 0x61, 0xe8, 0xab, 0xe7, 0x12, 0x2c, 0xdf, 0xa0, 0xb3, + 0xa6, 0xc6, 0x70, 0x99, 0xd6, 0xc4, 0x11, 0x06, 0xea, 0x5c, 0x9e, 0x1a, 0x42, 0xe3, 0x2c, 0x53, + 0x4d, 0xb7, 0x05, 0x91, 0x21, 0x03, 0x47, 0xf9, 0x10, 0xfb, 0x58, 0xaf, 0x7c, 0x62, 0x03, 0xcf, + 0x94, 0x92, 0x33, 0x74, 0x81, 0xd0, 0xbc, 0xad, 0xe9, 0xd6, 0x56, 0xb1, 0x82, 0x61, 0xde, 0x2c, + 0x29, 0xac, 0x6d, 0x55, 0x59, 0xcf, 0xb5, 0x21, 0xb6, 0x64, 0xe0, 0x58, 0x9d, 0xc9, 0x79, 0x62, + 0x4c, 0xf1, 0x8a, 0x38, 0xda, 0x02, 0x23, 0x19, 0x00, 0xf5, 0x42, 0xaa, 0x40, 0x95, 0x4f, 0xa1, + 0xc8, 0x5f, 0xae, 0x30, 0x55, 0x99, 0x9f, 0xa8, 0x97, 0xf2, 0x6c, 0x85, 0xd6, 0x91, 0xa9, 0xcb, + 0x40, 0x85, 0x67, 0x63, 0xa3, 0xf5, 0x0c, 0xa3, 0xac, 0x1c, 0x97, 0x3e, 0xe4, 0x29, 0xa9, 0x40, + 0x4e, 0x7c, 0x8a, 0xb5, 0x5f, 0xd5, 0x01, 0xf9, 0x92, 0x60, 0xac, 0x4e, 0xe5, 0x34, 0xb1, 0x5d, + 0x97, 0x8e, 0xb2, 0x0d, 0x32, 0x30, 0x51, 0x13, 0x79, 0x62, 0x39, 0x52, 0x60, 0x74, 0x30, 0x55, + 0x73, 0x39, 0x4e, 0x8c, 0x37, 0x68, 0x1d, 0x2e, 0x1d, 0xc1, 0x2c, 0x1b, 0x32, 0x18, 0xb1, 0x76, + 0xbe, 0xaa, 0x60, 0xbe, 0x7c, 0xff, 0xb8, 0xd7, 0xbd, 0x1f, 0x7b, 0xdd, 0x3b, 0xec, 0xb5, 0xf8, + 0xbd, 0xd7, 0xe2, 0x4b, 0xa7, 0xc5, 0xb7, 0x4e, 0x8b, 0x87, 0x4e, 0x8b, 0xc7, 0x4e, 0x8b, 0x9f, + 0x9d, 0x16, 0xbf, 0x3a, 0xdd, 0x3b, 0x64, 0xfe, 0xa4, 0xc5, 0xc3, 0x93, 0x16, 0x72, 0xd6, 0xb4, + 0x9f, 0x16, 0xff, 0xf3, 0x5f, 0x8e, 0x72, 0xf8, 0x65, 0xae, 0xa5, 0x14, 0xef, 0x06, 0xbb, 0x6d, + 0xf3, 0xb5, 0x3f, 0x08, 0x65, 0xf1, 0x61, 0xf8, 0xb7, 0xaa, 0xd7, 0x7f, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x03, 0xd4, 0x27, 0xff, 0xc3, 0x01, 0x00, 0x00, +} + +func (x Code) String() string { + s, ok := Code_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} diff --git a/vendor/github.com/gogo/googleapis/google/rpc/code.proto b/vendor/github.com/gogo/googleapis/google/rpc/code.proto new file mode 100644 index 00000000..0540a4f6 --- /dev/null +++ b/vendor/github.com/gogo/googleapis/google/rpc/code.proto @@ -0,0 +1,185 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.rpc; + +option go_package = "rpc"; +option java_multiple_files = true; +option java_outer_classname = "CodeProto"; +option java_package = "com.google.rpc"; +option objc_class_prefix = "RPC"; + +// The canonical error codes for Google APIs. +// +// +// Sometimes multiple error codes may apply. Services should return +// the most specific error code that applies. For example, prefer +// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. +// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`. +enum Code { + // Not an error; returned on success + // + // HTTP Mapping: 200 OK + OK = 0; + + // The operation was cancelled, typically by the caller. + // + // HTTP Mapping: 499 Client Closed Request + CANCELLED = 1; + + // Unknown error. For example, this error may be returned when + // a `Status` value received from another address space belongs to + // an error space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + // + // HTTP Mapping: 500 Internal Server Error + UNKNOWN = 2; + + // The client specified an invalid argument. Note that this differs + // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + // + // HTTP Mapping: 400 Bad Request + INVALID_ARGUMENT = 3; + + // The deadline expired before the operation could complete. For operations + // that change the state of the system, this error may be returned + // even if the operation has completed successfully. For example, a + // successful response from a server could have been delayed long + // enough for the deadline to expire. + // + // HTTP Mapping: 504 Gateway Timeout + DEADLINE_EXCEEDED = 4; + + // Some requested entity (e.g., file or directory) was not found. + // + // Note to server developers: if a request is denied for an entire class + // of users, such as gradual feature rollout or undocumented whitelist, + // `NOT_FOUND` may be used. If a request is denied for some users within + // a class of users, such as user-based access control, `PERMISSION_DENIED` + // must be used. + // + // HTTP Mapping: 404 Not Found + NOT_FOUND = 5; + + // The entity that a client attempted to create (e.g., file or directory) + // already exists. + // + // HTTP Mapping: 409 Conflict + ALREADY_EXISTS = 6; + + // The caller does not have permission to execute the specified + // operation. `PERMISSION_DENIED` must not be used for rejections + // caused by exhausting some resource (use `RESOURCE_EXHAUSTED` + // instead for those errors). `PERMISSION_DENIED` must not be + // used if the caller can not be identified (use `UNAUTHENTICATED` + // instead for those errors). This error code does not imply the + // request is valid or the requested entity exists or satisfies + // other pre-conditions. + // + // HTTP Mapping: 403 Forbidden + PERMISSION_DENIED = 7; + + // The request does not have valid authentication credentials for the + // operation. + // + // HTTP Mapping: 401 Unauthorized + UNAUTHENTICATED = 16; + + // Some resource has been exhausted, perhaps a per-user quota, or + // perhaps the entire file system is out of space. + // + // HTTP Mapping: 429 Too Many Requests + RESOURCE_EXHAUSTED = 8; + + // The operation was rejected because the system is not in a state + // required for the operation's execution. For example, the directory + // to be deleted is non-empty, an rmdir operation is applied to + // a non-directory, etc. + // + // Service implementors can use the following guidelines to decide + // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: + // (a) Use `UNAVAILABLE` if the client can retry just the failing call. + // (b) Use `ABORTED` if the client should retry at a higher level + // (e.g., when a client-specified test-and-set fails, indicating the + // client should restart a read-modify-write sequence). + // (c) Use `FAILED_PRECONDITION` if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, `FAILED_PRECONDITION` + // should be returned since the client should not retry unless + // the files are deleted from the directory. + // + // HTTP Mapping: 400 Bad Request + FAILED_PRECONDITION = 9; + + // The operation was aborted, typically due to a concurrency issue such as + // a sequencer check failure or transaction abort. + // + // See the guidelines above for deciding between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`. + // + // HTTP Mapping: 409 Conflict + ABORTED = 10; + + // The operation was attempted past the valid range. E.g., seeking or + // reading past end-of-file. + // + // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate `INVALID_ARGUMENT` if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // `OUT_OF_RANGE` if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between `FAILED_PRECONDITION` and + // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an `OUT_OF_RANGE` error to detect when + // they are done. + // + // HTTP Mapping: 400 Bad Request + OUT_OF_RANGE = 11; + + // The operation is not implemented or is not supported/enabled in this + // service. + // + // HTTP Mapping: 501 Not Implemented + UNIMPLEMENTED = 12; + + // Internal errors. This means that some invariants expected by the + // underlying system have been broken. This error code is reserved + // for serious errors. + // + // HTTP Mapping: 500 Internal Server Error + INTERNAL = 13; + + // The service is currently unavailable. This is most likely a + // transient condition, which can be corrected by retrying with + // a backoff. + // + // See the guidelines above for deciding between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`. + // + // HTTP Mapping: 503 Service Unavailable + UNAVAILABLE = 14; + + // Unrecoverable data loss or corruption. + // + // HTTP Mapping: 500 Internal Server Error + DATA_LOSS = 15; +} diff --git a/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go b/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go new file mode 100644 index 00000000..07d251d4 --- /dev/null +++ b/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go @@ -0,0 +1,4904 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/rpc/error_details.proto + +package rpc + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + types "github.com/gogo/protobuf/types" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Describes when the clients can retry a failed request. Clients could ignore +// the recommendation here or retry when this information is missing from error +// responses. +// +// It's always recommended that clients should use exponential backoff when +// retrying. +// +// Clients should wait until `retry_delay` amount of time has passed since +// receiving the error response before retrying. If retrying requests also +// fail, clients should use an exponential backoff scheme to gradually increase +// the delay between retries based on `retry_delay`, until either a maximum +// number of retires have been reached or a maximum retry delay cap has been +// reached. +type RetryInfo struct { + // Clients should wait at least this long between retrying the same request. + RetryDelay *types.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RetryInfo) Reset() { *m = RetryInfo{} } +func (*RetryInfo) ProtoMessage() {} +func (*RetryInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_851816e4d6b6361a, []int{0} +} +func (m *RetryInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RetryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RetryInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RetryInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_RetryInfo.Merge(m, src) +} +func (m *RetryInfo) XXX_Size() int { + return m.Size() +} +func (m *RetryInfo) XXX_DiscardUnknown() { + xxx_messageInfo_RetryInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_RetryInfo proto.InternalMessageInfo + +func (m *RetryInfo) GetRetryDelay() *types.Duration { + if m != nil { + return m.RetryDelay + } + return nil +} + +func (*RetryInfo) XXX_MessageName() string { + return "google.rpc.RetryInfo" +} + +// Describes additional debugging info. +type DebugInfo struct { + // The stack trace entries indicating where the error occurred. + StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"` + // Additional debugging information provided by the server. + Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DebugInfo) Reset() { *m = DebugInfo{} } +func (*DebugInfo) ProtoMessage() {} +func (*DebugInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_851816e4d6b6361a, []int{1} +} +func (m *DebugInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DebugInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DebugInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DebugInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_DebugInfo.Merge(m, src) +} +func (m *DebugInfo) XXX_Size() int { + return m.Size() +} +func (m *DebugInfo) XXX_DiscardUnknown() { + xxx_messageInfo_DebugInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_DebugInfo proto.InternalMessageInfo + +func (m *DebugInfo) GetStackEntries() []string { + if m != nil { + return m.StackEntries + } + return nil +} + +func (m *DebugInfo) GetDetail() string { + if m != nil { + return m.Detail + } + return "" +} + +func (*DebugInfo) XXX_MessageName() string { + return "google.rpc.DebugInfo" +} + +// Describes how a quota check failed. +// +// For example if a daily limit was exceeded for the calling project, +// a service could respond with a QuotaFailure detail containing the project +// id and the description of the quota limit that was exceeded. If the +// calling project hasn't enabled the service in the developer console, then +// a service could respond with the project id and set `service_disabled` +// to true. +// +// Also see RetryDetail and Help types for other details about handling a +// quota failure. +type QuotaFailure struct { + // Describes all quota violations. + Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QuotaFailure) Reset() { *m = QuotaFailure{} } +func (*QuotaFailure) ProtoMessage() {} +func (*QuotaFailure) Descriptor() ([]byte, []int) { + return fileDescriptor_851816e4d6b6361a, []int{2} +} +func (m *QuotaFailure) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuotaFailure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QuotaFailure.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QuotaFailure) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuotaFailure.Merge(m, src) +} +func (m *QuotaFailure) XXX_Size() int { + return m.Size() +} +func (m *QuotaFailure) XXX_DiscardUnknown() { + xxx_messageInfo_QuotaFailure.DiscardUnknown(m) +} + +var xxx_messageInfo_QuotaFailure proto.InternalMessageInfo + +func (m *QuotaFailure) GetViolations() []*QuotaFailure_Violation { + if m != nil { + return m.Violations + } + return nil +} + +func (*QuotaFailure) XXX_MessageName() string { + return "google.rpc.QuotaFailure" +} + +// A message type used to describe a single quota violation. For example, a +// daily quota or a custom quota that was exceeded. +type QuotaFailure_Violation struct { + // The subject on which the quota check failed. + // For example, "clientip:" or "project:". + Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the quota check failed. Clients can use this + // description to find more about the quota configuration in the service's + // public documentation, or find the relevant quota limit to adjust through + // developer console. + // + // For example: "Service disabled" or "Daily Limit for read operations + // exceeded". + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QuotaFailure_Violation) Reset() { *m = QuotaFailure_Violation{} } +func (*QuotaFailure_Violation) ProtoMessage() {} +func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) { + return fileDescriptor_851816e4d6b6361a, []int{2, 0} +} +func (m *QuotaFailure_Violation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuotaFailure_Violation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QuotaFailure_Violation.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QuotaFailure_Violation) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuotaFailure_Violation.Merge(m, src) +} +func (m *QuotaFailure_Violation) XXX_Size() int { + return m.Size() +} +func (m *QuotaFailure_Violation) XXX_DiscardUnknown() { + xxx_messageInfo_QuotaFailure_Violation.DiscardUnknown(m) +} + +var xxx_messageInfo_QuotaFailure_Violation proto.InternalMessageInfo + +func (m *QuotaFailure_Violation) GetSubject() string { + if m != nil { + return m.Subject + } + return "" +} + +func (m *QuotaFailure_Violation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (*QuotaFailure_Violation) XXX_MessageName() string { + return "google.rpc.QuotaFailure.Violation" +} + +// Describes what preconditions have failed. +// +// For example, if an RPC failed because it required the Terms of Service to be +// acknowledged, it could list the terms of service violation in the +// PreconditionFailure message. +type PreconditionFailure struct { + // Describes all precondition violations. + Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PreconditionFailure) Reset() { *m = PreconditionFailure{} } +func (*PreconditionFailure) ProtoMessage() {} +func (*PreconditionFailure) Descriptor() ([]byte, []int) { + return fileDescriptor_851816e4d6b6361a, []int{3} +} +func (m *PreconditionFailure) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PreconditionFailure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PreconditionFailure.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PreconditionFailure) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreconditionFailure.Merge(m, src) +} +func (m *PreconditionFailure) XXX_Size() int { + return m.Size() +} +func (m *PreconditionFailure) XXX_DiscardUnknown() { + xxx_messageInfo_PreconditionFailure.DiscardUnknown(m) +} + +var xxx_messageInfo_PreconditionFailure proto.InternalMessageInfo + +func (m *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation { + if m != nil { + return m.Violations + } + return nil +} + +func (*PreconditionFailure) XXX_MessageName() string { + return "google.rpc.PreconditionFailure" +} + +// A message type used to describe a single precondition failure. +type PreconditionFailure_Violation struct { + // The type of PreconditionFailure. We recommend using a service-specific + // enum type to define the supported precondition violation types. For + // example, "TOS" for "Terms of Service violation". + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The subject, relative to the type, that failed. + // For example, "google.com/cloud" relative to the "TOS" type would + // indicate which terms of service is being referenced. + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the precondition failed. Developers can use this + // description to understand how to fix the failure. + // + // For example: "Terms of service not accepted". + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PreconditionFailure_Violation) Reset() { *m = PreconditionFailure_Violation{} } +func (*PreconditionFailure_Violation) ProtoMessage() {} +func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) { + return fileDescriptor_851816e4d6b6361a, []int{3, 0} +} +func (m *PreconditionFailure_Violation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PreconditionFailure_Violation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PreconditionFailure_Violation.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PreconditionFailure_Violation) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreconditionFailure_Violation.Merge(m, src) +} +func (m *PreconditionFailure_Violation) XXX_Size() int { + return m.Size() +} +func (m *PreconditionFailure_Violation) XXX_DiscardUnknown() { + xxx_messageInfo_PreconditionFailure_Violation.DiscardUnknown(m) +} + +var xxx_messageInfo_PreconditionFailure_Violation proto.InternalMessageInfo + +func (m *PreconditionFailure_Violation) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PreconditionFailure_Violation) GetSubject() string { + if m != nil { + return m.Subject + } + return "" +} + +func (m *PreconditionFailure_Violation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (*PreconditionFailure_Violation) XXX_MessageName() string { + return "google.rpc.PreconditionFailure.Violation" +} + +// Describes violations in a client request. This error type focuses on the +// syntactic aspects of the request. +type BadRequest struct { + // Describes all violations in a client request. + FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BadRequest) Reset() { *m = BadRequest{} } +func (*BadRequest) ProtoMessage() {} +func (*BadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_851816e4d6b6361a, []int{4} +} +func (m *BadRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BadRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BadRequest.Merge(m, src) +} +func (m *BadRequest) XXX_Size() int { + return m.Size() +} +func (m *BadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BadRequest proto.InternalMessageInfo + +func (m *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation { + if m != nil { + return m.FieldViolations + } + return nil +} + +func (*BadRequest) XXX_MessageName() string { + return "google.rpc.BadRequest" +} + +// A message type used to describe a single bad request field. +type BadRequest_FieldViolation struct { + // A path leading to a field in the request body. The value will be a + // sequence of dot-separated identifiers that identify a protocol buffer + // field. E.g., "field_violations.field" would identify this field. + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + // A description of why the request element is bad. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BadRequest_FieldViolation) Reset() { *m = BadRequest_FieldViolation{} } +func (*BadRequest_FieldViolation) ProtoMessage() {} +func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) { + return fileDescriptor_851816e4d6b6361a, []int{4, 0} +} +func (m *BadRequest_FieldViolation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BadRequest_FieldViolation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BadRequest_FieldViolation.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BadRequest_FieldViolation) XXX_Merge(src proto.Message) { + xxx_messageInfo_BadRequest_FieldViolation.Merge(m, src) +} +func (m *BadRequest_FieldViolation) XXX_Size() int { + return m.Size() +} +func (m *BadRequest_FieldViolation) XXX_DiscardUnknown() { + xxx_messageInfo_BadRequest_FieldViolation.DiscardUnknown(m) +} + +var xxx_messageInfo_BadRequest_FieldViolation proto.InternalMessageInfo + +func (m *BadRequest_FieldViolation) GetField() string { + if m != nil { + return m.Field + } + return "" +} + +func (m *BadRequest_FieldViolation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (*BadRequest_FieldViolation) XXX_MessageName() string { + return "google.rpc.BadRequest.FieldViolation" +} + +// Contains metadata about the request that clients can attach when filing a bug +// or providing other forms of feedback. +type RequestInfo struct { + // An opaque string that should only be interpreted by the service generating + // it. For example, it can be used to identify requests in the service's logs. + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Any data that was used to serve this request. For example, an encrypted + // stack trace that can be sent back to the service provider for debugging. + ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestInfo) Reset() { *m = RequestInfo{} } +func (*RequestInfo) ProtoMessage() {} +func (*RequestInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_851816e4d6b6361a, []int{5} +} +func (m *RequestInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestInfo.Merge(m, src) +} +func (m *RequestInfo) XXX_Size() int { + return m.Size() +} +func (m *RequestInfo) XXX_DiscardUnknown() { + xxx_messageInfo_RequestInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestInfo proto.InternalMessageInfo + +func (m *RequestInfo) GetRequestId() string { + if m != nil { + return m.RequestId + } + return "" +} + +func (m *RequestInfo) GetServingData() string { + if m != nil { + return m.ServingData + } + return "" +} + +func (*RequestInfo) XXX_MessageName() string { + return "google.rpc.RequestInfo" +} + +// Describes the resource that is being accessed. +type ResourceInfo struct { + // A name for the type of resource being accessed, e.g. "sql table", + // "cloud storage bucket", "file", "Google calendar"; or the type URL + // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". + ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` + // The name of the resource being accessed. For example, a shared calendar + // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current + // error is + // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + // The owner of the resource (optional). + // For example, "user:" or "project:". + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + // Describes what error is encountered when accessing this resource. + // For example, updating a cloud project may require the `writer` permission + // on the developer console project. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceInfo) Reset() { *m = ResourceInfo{} } +func (*ResourceInfo) ProtoMessage() {} +func (*ResourceInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_851816e4d6b6361a, []int{6} +} +func (m *ResourceInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceInfo.Merge(m, src) +} +func (m *ResourceInfo) XXX_Size() int { + return m.Size() +} +func (m *ResourceInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceInfo proto.InternalMessageInfo + +func (m *ResourceInfo) GetResourceType() string { + if m != nil { + return m.ResourceType + } + return "" +} + +func (m *ResourceInfo) GetResourceName() string { + if m != nil { + return m.ResourceName + } + return "" +} + +func (m *ResourceInfo) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *ResourceInfo) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (*ResourceInfo) XXX_MessageName() string { + return "google.rpc.ResourceInfo" +} + +// Provides links to documentation or for performing an out of band action. +// +// For example, if a quota check failed with an error indicating the calling +// project hasn't enabled the accessed service, this can contain a URL pointing +// directly to the right place in the developer console to flip the bit. +type Help struct { + // URL(s) pointing to additional information on handling the current error. + Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Help) Reset() { *m = Help{} } +func (*Help) ProtoMessage() {} +func (*Help) Descriptor() ([]byte, []int) { + return fileDescriptor_851816e4d6b6361a, []int{7} +} +func (m *Help) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Help) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Help.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Help) XXX_Merge(src proto.Message) { + xxx_messageInfo_Help.Merge(m, src) +} +func (m *Help) XXX_Size() int { + return m.Size() +} +func (m *Help) XXX_DiscardUnknown() { + xxx_messageInfo_Help.DiscardUnknown(m) +} + +var xxx_messageInfo_Help proto.InternalMessageInfo + +func (m *Help) GetLinks() []*Help_Link { + if m != nil { + return m.Links + } + return nil +} + +func (*Help) XXX_MessageName() string { + return "google.rpc.Help" +} + +// Describes a URL link. +type Help_Link struct { + // Describes what the link offers. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The URL of the link. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Help_Link) Reset() { *m = Help_Link{} } +func (*Help_Link) ProtoMessage() {} +func (*Help_Link) Descriptor() ([]byte, []int) { + return fileDescriptor_851816e4d6b6361a, []int{7, 0} +} +func (m *Help_Link) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Help_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Help_Link.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Help_Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Help_Link.Merge(m, src) +} +func (m *Help_Link) XXX_Size() int { + return m.Size() +} +func (m *Help_Link) XXX_DiscardUnknown() { + xxx_messageInfo_Help_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Help_Link proto.InternalMessageInfo + +func (m *Help_Link) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Help_Link) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (*Help_Link) XXX_MessageName() string { + return "google.rpc.Help.Link" +} + +// Provides a localized error message that is safe to return to the user +// which can be attached to an RPC error. +type LocalizedMessage struct { + // The locale used following the specification defined at + // http://www.rfc-editor.org/rfc/bcp/bcp47.txt. + // Examples are: "en-US", "fr-CH", "es-MX" + Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"` + // The localized error message in the above locale. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LocalizedMessage) Reset() { *m = LocalizedMessage{} } +func (*LocalizedMessage) ProtoMessage() {} +func (*LocalizedMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_851816e4d6b6361a, []int{8} +} +func (m *LocalizedMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalizedMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LocalizedMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LocalizedMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalizedMessage.Merge(m, src) +} +func (m *LocalizedMessage) XXX_Size() int { + return m.Size() +} +func (m *LocalizedMessage) XXX_DiscardUnknown() { + xxx_messageInfo_LocalizedMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalizedMessage proto.InternalMessageInfo + +func (m *LocalizedMessage) GetLocale() string { + if m != nil { + return m.Locale + } + return "" +} + +func (m *LocalizedMessage) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (*LocalizedMessage) XXX_MessageName() string { + return "google.rpc.LocalizedMessage" +} +func init() { + proto.RegisterType((*RetryInfo)(nil), "google.rpc.RetryInfo") + proto.RegisterType((*DebugInfo)(nil), "google.rpc.DebugInfo") + proto.RegisterType((*QuotaFailure)(nil), "google.rpc.QuotaFailure") + proto.RegisterType((*QuotaFailure_Violation)(nil), "google.rpc.QuotaFailure.Violation") + proto.RegisterType((*PreconditionFailure)(nil), "google.rpc.PreconditionFailure") + proto.RegisterType((*PreconditionFailure_Violation)(nil), "google.rpc.PreconditionFailure.Violation") + proto.RegisterType((*BadRequest)(nil), "google.rpc.BadRequest") + proto.RegisterType((*BadRequest_FieldViolation)(nil), "google.rpc.BadRequest.FieldViolation") + proto.RegisterType((*RequestInfo)(nil), "google.rpc.RequestInfo") + proto.RegisterType((*ResourceInfo)(nil), "google.rpc.ResourceInfo") + proto.RegisterType((*Help)(nil), "google.rpc.Help") + proto.RegisterType((*Help_Link)(nil), "google.rpc.Help.Link") + proto.RegisterType((*LocalizedMessage)(nil), "google.rpc.LocalizedMessage") +} + +func init() { proto.RegisterFile("google/rpc/error_details.proto", fileDescriptor_851816e4d6b6361a) } + +var fileDescriptor_851816e4d6b6361a = []byte{ + // 624 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xbf, 0x6f, 0xd3, 0x40, + 0x18, 0xed, 0x35, 0x69, 0x91, 0xbf, 0x84, 0x52, 0xcc, 0x0f, 0x85, 0x48, 0x9c, 0x82, 0x11, 0x52, + 0x11, 0x92, 0x2b, 0x95, 0xad, 0x63, 0x48, 0x7f, 0x49, 0x05, 0x82, 0x85, 0x18, 0x60, 0xb0, 0x2e, + 0xf6, 0x97, 0xe8, 0xa8, 0xe3, 0x33, 0x67, 0xbb, 0xa8, 0x4c, 0xfc, 0x09, 0xec, 0x6c, 0x4c, 0xfd, + 0x27, 0xd8, 0x3b, 0x76, 0x64, 0x24, 0xe9, 0xc2, 0xd8, 0x91, 0x11, 0x9d, 0x7d, 0xd7, 0xba, 0x4d, + 0x41, 0x6c, 0x7e, 0xef, 0xde, 0x3d, 0xbf, 0xf7, 0xe9, 0xee, 0x80, 0x8e, 0x84, 0x18, 0x45, 0xb8, + 0x2a, 0x93, 0x60, 0x15, 0xa5, 0x14, 0xd2, 0x0f, 0x31, 0x63, 0x3c, 0x4a, 0xdd, 0x44, 0x8a, 0x4c, + 0xd8, 0x50, 0xae, 0xbb, 0x32, 0x09, 0xda, 0x46, 0x5b, 0xac, 0x0c, 0xf2, 0xe1, 0x6a, 0x98, 0x4b, + 0x96, 0x71, 0x11, 0x97, 0x5a, 0x67, 0x0b, 0x2c, 0x0f, 0x33, 0x79, 0xb0, 0x13, 0x0f, 0x85, 0xbd, + 0x0e, 0x0d, 0xa9, 0x80, 0x1f, 0x62, 0xc4, 0x0e, 0x5a, 0xa4, 0x43, 0x56, 0x1a, 0x6b, 0xf7, 0x5c, + 0x6d, 0x67, 0x2c, 0xdc, 0x9e, 0xb6, 0xf0, 0xa0, 0x50, 0xf7, 0x94, 0xd8, 0xd9, 0x06, 0xab, 0x87, + 0x83, 0x7c, 0x54, 0x18, 0x3d, 0x84, 0xeb, 0x69, 0xc6, 0x82, 0x3d, 0x1f, 0xe3, 0x4c, 0x72, 0x4c, + 0x5b, 0xa4, 0x53, 0x5b, 0xb1, 0xbc, 0x66, 0x41, 0x6e, 0x94, 0x9c, 0x7d, 0x17, 0x16, 0xcb, 0xdc, + 0xad, 0xf9, 0x0e, 0x59, 0xb1, 0x3c, 0x8d, 0x9c, 0xaf, 0x04, 0x9a, 0xaf, 0x72, 0x91, 0xb1, 0x4d, + 0xc6, 0xa3, 0x5c, 0xa2, 0xdd, 0x05, 0xd8, 0xe7, 0x22, 0x2a, 0xfe, 0x59, 0x5a, 0x35, 0xd6, 0x1c, + 0xf7, 0xbc, 0xa4, 0x5b, 0x55, 0xbb, 0x6f, 0x8c, 0xd4, 0xab, 0xec, 0x6a, 0x6f, 0x81, 0x75, 0xb6, + 0x60, 0xb7, 0xe0, 0x5a, 0x9a, 0x0f, 0xde, 0x63, 0x90, 0x15, 0x1d, 0x2d, 0xcf, 0x40, 0xbb, 0x03, + 0x8d, 0x10, 0xd3, 0x40, 0xf2, 0x44, 0x09, 0x75, 0xb0, 0x2a, 0xe5, 0x7c, 0x27, 0x70, 0xab, 0x2f, + 0x31, 0x10, 0x71, 0xc8, 0x15, 0x61, 0x42, 0xee, 0x5c, 0x11, 0xf2, 0x71, 0x35, 0xe4, 0x15, 0x9b, + 0xfe, 0x92, 0xf5, 0x5d, 0x35, 0xab, 0x0d, 0xf5, 0xec, 0x20, 0x41, 0x1d, 0xb4, 0xf8, 0xae, 0xe6, + 0x9f, 0xff, 0x67, 0xfe, 0xda, 0x6c, 0xfe, 0x43, 0x02, 0xd0, 0x65, 0xa1, 0x87, 0x1f, 0x72, 0x4c, + 0x33, 0xbb, 0x0f, 0xcb, 0x43, 0x8e, 0x51, 0xe8, 0xcf, 0x84, 0x7f, 0x54, 0x0d, 0x7f, 0xbe, 0xc3, + 0xdd, 0x54, 0xf2, 0xf3, 0xe0, 0x37, 0x86, 0x17, 0x70, 0xda, 0xde, 0x86, 0xa5, 0x8b, 0x12, 0xfb, + 0x36, 0x2c, 0x14, 0x22, 0xdd, 0xa1, 0x04, 0xff, 0x31, 0xea, 0x97, 0xd0, 0xd0, 0x3f, 0x2d, 0x0e, + 0xd5, 0x7d, 0x00, 0x59, 0x42, 0x9f, 0x1b, 0x2f, 0x4b, 0x33, 0x3b, 0xa1, 0xfd, 0x00, 0x9a, 0x29, + 0xca, 0x7d, 0x1e, 0x8f, 0xfc, 0x90, 0x65, 0xcc, 0x18, 0x6a, 0xae, 0xc7, 0x32, 0xe6, 0x7c, 0x21, + 0xd0, 0xf4, 0x30, 0x15, 0xb9, 0x0c, 0xd0, 0x9c, 0x53, 0xa9, 0xb1, 0x5f, 0x99, 0x72, 0xd3, 0x90, + 0xaf, 0xd5, 0xb4, 0xab, 0xa2, 0x98, 0x8d, 0x51, 0x3b, 0x9f, 0x89, 0x5e, 0xb0, 0x31, 0xaa, 0x8e, + 0xe2, 0x63, 0x8c, 0x52, 0x8f, 0xbc, 0x04, 0x97, 0x3b, 0xd6, 0x67, 0x3b, 0x0a, 0xa8, 0x6f, 0x63, + 0x94, 0xd8, 0x4f, 0x60, 0x21, 0xe2, 0xf1, 0x9e, 0x19, 0xfe, 0x9d, 0xea, 0xf0, 0x95, 0xc0, 0xdd, + 0xe5, 0xf1, 0x9e, 0x57, 0x6a, 0xda, 0xeb, 0x50, 0x57, 0xf0, 0xb2, 0x3d, 0x99, 0xb1, 0xb7, 0x97, + 0xa1, 0x96, 0x4b, 0x73, 0xc1, 0xd4, 0xa7, 0xd3, 0x83, 0xe5, 0x5d, 0x11, 0xb0, 0x88, 0x7f, 0xc2, + 0xf0, 0x39, 0xa6, 0x29, 0x1b, 0xa1, 0xba, 0x89, 0x91, 0xe2, 0x4c, 0x7f, 0x8d, 0xd4, 0x39, 0x1b, + 0x97, 0x12, 0x73, 0xce, 0x34, 0xec, 0x86, 0xc7, 0x13, 0x3a, 0xf7, 0x63, 0x42, 0xe7, 0x4e, 0x27, + 0x94, 0xfc, 0x9e, 0x50, 0xf2, 0x79, 0x4a, 0xc9, 0xe1, 0x94, 0x92, 0xa3, 0x29, 0x25, 0xc7, 0x53, + 0x4a, 0x7e, 0x4e, 0x29, 0xf9, 0x35, 0xa5, 0x73, 0xa7, 0x8a, 0x3f, 0xa1, 0xe4, 0xe8, 0x84, 0x12, + 0x58, 0x0a, 0xc4, 0xb8, 0x52, 0xac, 0x7b, 0x73, 0x43, 0xbd, 0x5e, 0xbd, 0xf2, 0xf1, 0xea, 0xab, + 0xe7, 0xa5, 0x4f, 0xde, 0xd6, 0x64, 0x12, 0x7c, 0x9b, 0xaf, 0x79, 0xfd, 0x67, 0x83, 0xc5, 0xe2, + 0xc9, 0x79, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x63, 0xe4, 0x76, 0x26, 0xf1, 0x04, 0x00, 0x00, +} + +func (this *RetryInfo) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*RetryInfo) + if !ok { + that2, ok := that.(RetryInfo) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := this.RetryDelay.Compare(that1.RetryDelay); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *DebugInfo) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*DebugInfo) + if !ok { + that2, ok := that.(DebugInfo) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.StackEntries) != len(that1.StackEntries) { + if len(this.StackEntries) < len(that1.StackEntries) { + return -1 + } + return 1 + } + for i := range this.StackEntries { + if this.StackEntries[i] != that1.StackEntries[i] { + if this.StackEntries[i] < that1.StackEntries[i] { + return -1 + } + return 1 + } + } + if this.Detail != that1.Detail { + if this.Detail < that1.Detail { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *QuotaFailure) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*QuotaFailure) + if !ok { + that2, ok := that.(QuotaFailure) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Violations) != len(that1.Violations) { + if len(this.Violations) < len(that1.Violations) { + return -1 + } + return 1 + } + for i := range this.Violations { + if c := this.Violations[i].Compare(that1.Violations[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *QuotaFailure_Violation) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*QuotaFailure_Violation) + if !ok { + that2, ok := that.(QuotaFailure_Violation) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Subject != that1.Subject { + if this.Subject < that1.Subject { + return -1 + } + return 1 + } + if this.Description != that1.Description { + if this.Description < that1.Description { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *PreconditionFailure) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*PreconditionFailure) + if !ok { + that2, ok := that.(PreconditionFailure) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Violations) != len(that1.Violations) { + if len(this.Violations) < len(that1.Violations) { + return -1 + } + return 1 + } + for i := range this.Violations { + if c := this.Violations[i].Compare(that1.Violations[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *PreconditionFailure_Violation) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*PreconditionFailure_Violation) + if !ok { + that2, ok := that.(PreconditionFailure_Violation) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Type != that1.Type { + if this.Type < that1.Type { + return -1 + } + return 1 + } + if this.Subject != that1.Subject { + if this.Subject < that1.Subject { + return -1 + } + return 1 + } + if this.Description != that1.Description { + if this.Description < that1.Description { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *BadRequest) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BadRequest) + if !ok { + that2, ok := that.(BadRequest) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.FieldViolations) != len(that1.FieldViolations) { + if len(this.FieldViolations) < len(that1.FieldViolations) { + return -1 + } + return 1 + } + for i := range this.FieldViolations { + if c := this.FieldViolations[i].Compare(that1.FieldViolations[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *BadRequest_FieldViolation) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BadRequest_FieldViolation) + if !ok { + that2, ok := that.(BadRequest_FieldViolation) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Field != that1.Field { + if this.Field < that1.Field { + return -1 + } + return 1 + } + if this.Description != that1.Description { + if this.Description < that1.Description { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *RequestInfo) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*RequestInfo) + if !ok { + that2, ok := that.(RequestInfo) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.RequestId != that1.RequestId { + if this.RequestId < that1.RequestId { + return -1 + } + return 1 + } + if this.ServingData != that1.ServingData { + if this.ServingData < that1.ServingData { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *ResourceInfo) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*ResourceInfo) + if !ok { + that2, ok := that.(ResourceInfo) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.ResourceType != that1.ResourceType { + if this.ResourceType < that1.ResourceType { + return -1 + } + return 1 + } + if this.ResourceName != that1.ResourceName { + if this.ResourceName < that1.ResourceName { + return -1 + } + return 1 + } + if this.Owner != that1.Owner { + if this.Owner < that1.Owner { + return -1 + } + return 1 + } + if this.Description != that1.Description { + if this.Description < that1.Description { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Help) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Help) + if !ok { + that2, ok := that.(Help) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Links) != len(that1.Links) { + if len(this.Links) < len(that1.Links) { + return -1 + } + return 1 + } + for i := range this.Links { + if c := this.Links[i].Compare(that1.Links[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Help_Link) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Help_Link) + if !ok { + that2, ok := that.(Help_Link) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Description != that1.Description { + if this.Description < that1.Description { + return -1 + } + return 1 + } + if this.Url != that1.Url { + if this.Url < that1.Url { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *LocalizedMessage) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*LocalizedMessage) + if !ok { + that2, ok := that.(LocalizedMessage) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Locale != that1.Locale { + if this.Locale < that1.Locale { + return -1 + } + return 1 + } + if this.Message != that1.Message { + if this.Message < that1.Message { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *RetryInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RetryInfo) + if !ok { + that2, ok := that.(RetryInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.RetryDelay.Equal(that1.RetryDelay) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *DebugInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DebugInfo) + if !ok { + that2, ok := that.(DebugInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.StackEntries) != len(that1.StackEntries) { + return false + } + for i := range this.StackEntries { + if this.StackEntries[i] != that1.StackEntries[i] { + return false + } + } + if this.Detail != that1.Detail { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *QuotaFailure) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QuotaFailure) + if !ok { + that2, ok := that.(QuotaFailure) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Violations) != len(that1.Violations) { + return false + } + for i := range this.Violations { + if !this.Violations[i].Equal(that1.Violations[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *QuotaFailure_Violation) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*QuotaFailure_Violation) + if !ok { + that2, ok := that.(QuotaFailure_Violation) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Subject != that1.Subject { + return false + } + if this.Description != that1.Description { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *PreconditionFailure) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PreconditionFailure) + if !ok { + that2, ok := that.(PreconditionFailure) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Violations) != len(that1.Violations) { + return false + } + for i := range this.Violations { + if !this.Violations[i].Equal(that1.Violations[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *PreconditionFailure_Violation) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PreconditionFailure_Violation) + if !ok { + that2, ok := that.(PreconditionFailure_Violation) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if this.Subject != that1.Subject { + return false + } + if this.Description != that1.Description { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BadRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BadRequest) + if !ok { + that2, ok := that.(BadRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.FieldViolations) != len(that1.FieldViolations) { + return false + } + for i := range this.FieldViolations { + if !this.FieldViolations[i].Equal(that1.FieldViolations[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BadRequest_FieldViolation) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BadRequest_FieldViolation) + if !ok { + that2, ok := that.(BadRequest_FieldViolation) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Field != that1.Field { + return false + } + if this.Description != that1.Description { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *RequestInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RequestInfo) + if !ok { + that2, ok := that.(RequestInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.RequestId != that1.RequestId { + return false + } + if this.ServingData != that1.ServingData { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *ResourceInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceInfo) + if !ok { + that2, ok := that.(ResourceInfo) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ResourceType != that1.ResourceType { + return false + } + if this.ResourceName != that1.ResourceName { + return false + } + if this.Owner != that1.Owner { + return false + } + if this.Description != that1.Description { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Help) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Help) + if !ok { + that2, ok := that.(Help) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Links) != len(that1.Links) { + return false + } + for i := range this.Links { + if !this.Links[i].Equal(that1.Links[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Help_Link) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Help_Link) + if !ok { + that2, ok := that.(Help_Link) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Description != that1.Description { + return false + } + if this.Url != that1.Url { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *LocalizedMessage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*LocalizedMessage) + if !ok { + that2, ok := that.(LocalizedMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Locale != that1.Locale { + return false + } + if this.Message != that1.Message { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *RetryInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&rpc.RetryInfo{") + if this.RetryDelay != nil { + s = append(s, "RetryDelay: "+fmt.Sprintf("%#v", this.RetryDelay)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DebugInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&rpc.DebugInfo{") + s = append(s, "StackEntries: "+fmt.Sprintf("%#v", this.StackEntries)+",\n") + s = append(s, "Detail: "+fmt.Sprintf("%#v", this.Detail)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QuotaFailure) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&rpc.QuotaFailure{") + if this.Violations != nil { + s = append(s, "Violations: "+fmt.Sprintf("%#v", this.Violations)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *QuotaFailure_Violation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&rpc.QuotaFailure_Violation{") + s = append(s, "Subject: "+fmt.Sprintf("%#v", this.Subject)+",\n") + s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *PreconditionFailure) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&rpc.PreconditionFailure{") + if this.Violations != nil { + s = append(s, "Violations: "+fmt.Sprintf("%#v", this.Violations)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *PreconditionFailure_Violation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&rpc.PreconditionFailure_Violation{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "Subject: "+fmt.Sprintf("%#v", this.Subject)+",\n") + s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BadRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&rpc.BadRequest{") + if this.FieldViolations != nil { + s = append(s, "FieldViolations: "+fmt.Sprintf("%#v", this.FieldViolations)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BadRequest_FieldViolation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&rpc.BadRequest_FieldViolation{") + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *RequestInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&rpc.RequestInfo{") + s = append(s, "RequestId: "+fmt.Sprintf("%#v", this.RequestId)+",\n") + s = append(s, "ServingData: "+fmt.Sprintf("%#v", this.ServingData)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ResourceInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&rpc.ResourceInfo{") + s = append(s, "ResourceType: "+fmt.Sprintf("%#v", this.ResourceType)+",\n") + s = append(s, "ResourceName: "+fmt.Sprintf("%#v", this.ResourceName)+",\n") + s = append(s, "Owner: "+fmt.Sprintf("%#v", this.Owner)+",\n") + s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Help) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&rpc.Help{") + if this.Links != nil { + s = append(s, "Links: "+fmt.Sprintf("%#v", this.Links)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Help_Link) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&rpc.Help_Link{") + s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n") + s = append(s, "Url: "+fmt.Sprintf("%#v", this.Url)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *LocalizedMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&rpc.LocalizedMessage{") + s = append(s, "Locale: "+fmt.Sprintf("%#v", this.Locale)+",\n") + s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringErrorDetails(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *RetryInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RetryInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.RetryDelay != nil { + { + size, err := m.RetryDelay.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintErrorDetails(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DebugInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DebugInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DebugInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Detail) > 0 { + i -= len(m.Detail) + copy(dAtA[i:], m.Detail) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Detail))) + i-- + dAtA[i] = 0x12 + } + if len(m.StackEntries) > 0 { + for iNdEx := len(m.StackEntries) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StackEntries[iNdEx]) + copy(dAtA[i:], m.StackEntries[iNdEx]) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.StackEntries[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QuotaFailure) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuotaFailure) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QuotaFailure) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Violations) > 0 { + for iNdEx := len(m.Violations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Violations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintErrorDetails(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QuotaFailure_Violation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuotaFailure_Violation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QuotaFailure_Violation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x12 + } + if len(m.Subject) > 0 { + i -= len(m.Subject) + copy(dAtA[i:], m.Subject) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Subject))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PreconditionFailure) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PreconditionFailure) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PreconditionFailure) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Violations) > 0 { + for iNdEx := len(m.Violations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Violations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintErrorDetails(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PreconditionFailure_Violation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PreconditionFailure_Violation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PreconditionFailure_Violation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x1a + } + if len(m.Subject) > 0 { + i -= len(m.Subject) + copy(dAtA[i:], m.Subject) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Subject))) + i-- + dAtA[i] = 0x12 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BadRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BadRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.FieldViolations) > 0 { + for iNdEx := len(m.FieldViolations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FieldViolations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintErrorDetails(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BadRequest_FieldViolation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BadRequest_FieldViolation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BadRequest_FieldViolation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x12 + } + if len(m.Field) > 0 { + i -= len(m.Field) + copy(dAtA[i:], m.Field) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Field))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ServingData) > 0 { + i -= len(m.ServingData) + copy(dAtA[i:], m.ServingData) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.ServingData))) + i-- + dAtA[i] = 0x12 + } + if len(m.RequestId) > 0 { + i -= len(m.RequestId) + copy(dAtA[i:], m.RequestId) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.RequestId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResourceInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0x1a + } + if len(m.ResourceName) > 0 { + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0x12 + } + if len(m.ResourceType) > 0 { + i -= len(m.ResourceType) + copy(dAtA[i:], m.ResourceType) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.ResourceType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Help) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Help) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Help) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Links) > 0 { + for iNdEx := len(m.Links) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Links[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintErrorDetails(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Help_Link) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Help_Link) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Help_Link) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0x12 + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LocalizedMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalizedMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalizedMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + } + if len(m.Locale) > 0 { + i -= len(m.Locale) + copy(dAtA[i:], m.Locale) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Locale))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintErrorDetails(dAtA []byte, offset int, v uint64) int { + offset -= sovErrorDetails(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedRetryInfo(r randyErrorDetails, easy bool) *RetryInfo { + this := &RetryInfo{} + if r.Intn(5) != 0 { + this.RetryDelay = types.NewPopulatedDuration(r, easy) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 2) + } + return this +} + +func NewPopulatedDebugInfo(r randyErrorDetails, easy bool) *DebugInfo { + this := &DebugInfo{} + v1 := r.Intn(10) + this.StackEntries = make([]string, v1) + for i := 0; i < v1; i++ { + this.StackEntries[i] = string(randStringErrorDetails(r)) + } + this.Detail = string(randStringErrorDetails(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3) + } + return this +} + +func NewPopulatedQuotaFailure(r randyErrorDetails, easy bool) *QuotaFailure { + this := &QuotaFailure{} + if r.Intn(5) != 0 { + v2 := r.Intn(5) + this.Violations = make([]*QuotaFailure_Violation, v2) + for i := 0; i < v2; i++ { + this.Violations[i] = NewPopulatedQuotaFailure_Violation(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 2) + } + return this +} + +func NewPopulatedQuotaFailure_Violation(r randyErrorDetails, easy bool) *QuotaFailure_Violation { + this := &QuotaFailure_Violation{} + this.Subject = string(randStringErrorDetails(r)) + this.Description = string(randStringErrorDetails(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3) + } + return this +} + +func NewPopulatedPreconditionFailure(r randyErrorDetails, easy bool) *PreconditionFailure { + this := &PreconditionFailure{} + if r.Intn(5) != 0 { + v3 := r.Intn(5) + this.Violations = make([]*PreconditionFailure_Violation, v3) + for i := 0; i < v3; i++ { + this.Violations[i] = NewPopulatedPreconditionFailure_Violation(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 2) + } + return this +} + +func NewPopulatedPreconditionFailure_Violation(r randyErrorDetails, easy bool) *PreconditionFailure_Violation { + this := &PreconditionFailure_Violation{} + this.Type = string(randStringErrorDetails(r)) + this.Subject = string(randStringErrorDetails(r)) + this.Description = string(randStringErrorDetails(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 4) + } + return this +} + +func NewPopulatedBadRequest(r randyErrorDetails, easy bool) *BadRequest { + this := &BadRequest{} + if r.Intn(5) != 0 { + v4 := r.Intn(5) + this.FieldViolations = make([]*BadRequest_FieldViolation, v4) + for i := 0; i < v4; i++ { + this.FieldViolations[i] = NewPopulatedBadRequest_FieldViolation(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 2) + } + return this +} + +func NewPopulatedBadRequest_FieldViolation(r randyErrorDetails, easy bool) *BadRequest_FieldViolation { + this := &BadRequest_FieldViolation{} + this.Field = string(randStringErrorDetails(r)) + this.Description = string(randStringErrorDetails(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3) + } + return this +} + +func NewPopulatedRequestInfo(r randyErrorDetails, easy bool) *RequestInfo { + this := &RequestInfo{} + this.RequestId = string(randStringErrorDetails(r)) + this.ServingData = string(randStringErrorDetails(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3) + } + return this +} + +func NewPopulatedResourceInfo(r randyErrorDetails, easy bool) *ResourceInfo { + this := &ResourceInfo{} + this.ResourceType = string(randStringErrorDetails(r)) + this.ResourceName = string(randStringErrorDetails(r)) + this.Owner = string(randStringErrorDetails(r)) + this.Description = string(randStringErrorDetails(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 5) + } + return this +} + +func NewPopulatedHelp(r randyErrorDetails, easy bool) *Help { + this := &Help{} + if r.Intn(5) != 0 { + v5 := r.Intn(5) + this.Links = make([]*Help_Link, v5) + for i := 0; i < v5; i++ { + this.Links[i] = NewPopulatedHelp_Link(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 2) + } + return this +} + +func NewPopulatedHelp_Link(r randyErrorDetails, easy bool) *Help_Link { + this := &Help_Link{} + this.Description = string(randStringErrorDetails(r)) + this.Url = string(randStringErrorDetails(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3) + } + return this +} + +func NewPopulatedLocalizedMessage(r randyErrorDetails, easy bool) *LocalizedMessage { + this := &LocalizedMessage{} + this.Locale = string(randStringErrorDetails(r)) + this.Message = string(randStringErrorDetails(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3) + } + return this +} + +type randyErrorDetails interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneErrorDetails(r randyErrorDetails) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringErrorDetails(r randyErrorDetails) string { + v6 := r.Intn(100) + tmps := make([]rune, v6) + for i := 0; i < v6; i++ { + tmps[i] = randUTF8RuneErrorDetails(r) + } + return string(tmps) +} +func randUnrecognizedErrorDetails(r randyErrorDetails, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldErrorDetails(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldErrorDetails(dAtA []byte, r randyErrorDetails, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(key)) + v7 := r.Int63() + if r.Intn(2) == 0 { + v7 *= -1 + } + dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(v7)) + case 1: + dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateErrorDetails(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *RetryInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RetryDelay != nil { + l = m.RetryDelay.Size() + n += 1 + l + sovErrorDetails(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DebugInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.StackEntries) > 0 { + for _, s := range m.StackEntries { + l = len(s) + n += 1 + l + sovErrorDetails(uint64(l)) + } + } + l = len(m.Detail) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *QuotaFailure) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Violations) > 0 { + for _, e := range m.Violations { + l = e.Size() + n += 1 + l + sovErrorDetails(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *QuotaFailure_Violation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Subject) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PreconditionFailure) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Violations) > 0 { + for _, e := range m.Violations { + l = e.Size() + n += 1 + l + sovErrorDetails(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PreconditionFailure_Violation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + l = len(m.Subject) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BadRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.FieldViolations) > 0 { + for _, e := range m.FieldViolations { + l = e.Size() + n += 1 + l + sovErrorDetails(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BadRequest_FieldViolation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Field) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RequestInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RequestId) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + l = len(m.ServingData) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResourceInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResourceType) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + l = len(m.ResourceName) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Help) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Links) > 0 { + for _, e := range m.Links { + l = e.Size() + n += 1 + l + sovErrorDetails(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Help_Link) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Description) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + l = len(m.Url) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LocalizedMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Locale) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovErrorDetails(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozErrorDetails(x uint64) (n int) { + return sovErrorDetails(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RetryInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RetryInfo{`, + `RetryDelay:` + strings.Replace(fmt.Sprintf("%v", this.RetryDelay), "Duration", "types.Duration", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *DebugInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DebugInfo{`, + `StackEntries:` + fmt.Sprintf("%v", this.StackEntries) + `,`, + `Detail:` + fmt.Sprintf("%v", this.Detail) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *QuotaFailure) String() string { + if this == nil { + return "nil" + } + repeatedStringForViolations := "[]*QuotaFailure_Violation{" + for _, f := range this.Violations { + repeatedStringForViolations += strings.Replace(fmt.Sprintf("%v", f), "QuotaFailure_Violation", "QuotaFailure_Violation", 1) + "," + } + repeatedStringForViolations += "}" + s := strings.Join([]string{`&QuotaFailure{`, + `Violations:` + repeatedStringForViolations + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *QuotaFailure_Violation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QuotaFailure_Violation{`, + `Subject:` + fmt.Sprintf("%v", this.Subject) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *PreconditionFailure) String() string { + if this == nil { + return "nil" + } + repeatedStringForViolations := "[]*PreconditionFailure_Violation{" + for _, f := range this.Violations { + repeatedStringForViolations += strings.Replace(fmt.Sprintf("%v", f), "PreconditionFailure_Violation", "PreconditionFailure_Violation", 1) + "," + } + repeatedStringForViolations += "}" + s := strings.Join([]string{`&PreconditionFailure{`, + `Violations:` + repeatedStringForViolations + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *PreconditionFailure_Violation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreconditionFailure_Violation{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Subject:` + fmt.Sprintf("%v", this.Subject) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BadRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForFieldViolations := "[]*BadRequest_FieldViolation{" + for _, f := range this.FieldViolations { + repeatedStringForFieldViolations += strings.Replace(fmt.Sprintf("%v", f), "BadRequest_FieldViolation", "BadRequest_FieldViolation", 1) + "," + } + repeatedStringForFieldViolations += "}" + s := strings.Join([]string{`&BadRequest{`, + `FieldViolations:` + repeatedStringForFieldViolations + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BadRequest_FieldViolation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BadRequest_FieldViolation{`, + `Field:` + fmt.Sprintf("%v", this.Field) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *RequestInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RequestInfo{`, + `RequestId:` + fmt.Sprintf("%v", this.RequestId) + `,`, + `ServingData:` + fmt.Sprintf("%v", this.ServingData) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceInfo{`, + `ResourceType:` + fmt.Sprintf("%v", this.ResourceType) + `,`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `Owner:` + fmt.Sprintf("%v", this.Owner) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Help) String() string { + if this == nil { + return "nil" + } + repeatedStringForLinks := "[]*Help_Link{" + for _, f := range this.Links { + repeatedStringForLinks += strings.Replace(fmt.Sprintf("%v", f), "Help_Link", "Help_Link", 1) + "," + } + repeatedStringForLinks += "}" + s := strings.Join([]string{`&Help{`, + `Links:` + repeatedStringForLinks + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Help_Link) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Help_Link{`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Url:` + fmt.Sprintf("%v", this.Url) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *LocalizedMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalizedMessage{`, + `Locale:` + fmt.Sprintf("%v", this.Locale) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringErrorDetails(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RetryInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RetryInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RetryInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RetryDelay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RetryDelay == nil { + m.RetryDelay = &types.Duration{} + } + if err := m.RetryDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorDetails(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DebugInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DebugInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DebugInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StackEntries", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StackEntries = append(m.StackEntries, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Detail", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Detail = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorDetails(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QuotaFailure) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuotaFailure: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuotaFailure: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Violations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Violations = append(m.Violations, &QuotaFailure_Violation{}) + if err := m.Violations[len(m.Violations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorDetails(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QuotaFailure_Violation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Violation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Violation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subject = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorDetails(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreconditionFailure) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreconditionFailure: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreconditionFailure: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Violations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Violations = append(m.Violations, &PreconditionFailure_Violation{}) + if err := m.Violations[len(m.Violations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorDetails(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreconditionFailure_Violation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Violation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Violation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subject = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorDetails(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BadRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BadRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BadRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldViolations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldViolations = append(m.FieldViolations, &BadRequest_FieldViolation{}) + if err := m.FieldViolations[len(m.FieldViolations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorDetails(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BadRequest_FieldViolation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldViolation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldViolation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Field = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorDetails(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServingData", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServingData = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorDetails(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorDetails(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Help) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Help: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Help: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Links", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Links = append(m.Links, &Help_Link{}) + if err := m.Links[len(m.Links)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorDetails(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Help_Link) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Link: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Url = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorDetails(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalizedMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalizedMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalizedMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Locale", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Locale = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorDetails(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipErrorDetails(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthErrorDetails + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupErrorDetails + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthErrorDetails + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthErrorDetails = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowErrorDetails = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupErrorDetails = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/googleapis/google/rpc/error_details.proto b/vendor/github.com/gogo/googleapis/google/rpc/error_details.proto new file mode 100644 index 00000000..0682cc97 --- /dev/null +++ b/vendor/github.com/gogo/googleapis/google/rpc/error_details.proto @@ -0,0 +1,200 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.rpc; + +import "google/protobuf/duration.proto"; + +option go_package = "rpc"; +option java_multiple_files = true; +option java_outer_classname = "ErrorDetailsProto"; +option java_package = "com.google.rpc"; +option objc_class_prefix = "RPC"; + +// Describes when the clients can retry a failed request. Clients could ignore +// the recommendation here or retry when this information is missing from error +// responses. +// +// It's always recommended that clients should use exponential backoff when +// retrying. +// +// Clients should wait until `retry_delay` amount of time has passed since +// receiving the error response before retrying. If retrying requests also +// fail, clients should use an exponential backoff scheme to gradually increase +// the delay between retries based on `retry_delay`, until either a maximum +// number of retires have been reached or a maximum retry delay cap has been +// reached. +message RetryInfo { + // Clients should wait at least this long between retrying the same request. + google.protobuf.Duration retry_delay = 1; +} + +// Describes additional debugging info. +message DebugInfo { + // The stack trace entries indicating where the error occurred. + repeated string stack_entries = 1; + + // Additional debugging information provided by the server. + string detail = 2; +} + +// Describes how a quota check failed. +// +// For example if a daily limit was exceeded for the calling project, +// a service could respond with a QuotaFailure detail containing the project +// id and the description of the quota limit that was exceeded. If the +// calling project hasn't enabled the service in the developer console, then +// a service could respond with the project id and set `service_disabled` +// to true. +// +// Also see RetryDetail and Help types for other details about handling a +// quota failure. +message QuotaFailure { + // A message type used to describe a single quota violation. For example, a + // daily quota or a custom quota that was exceeded. + message Violation { + // The subject on which the quota check failed. + // For example, "clientip:" or "project:". + string subject = 1; + + // A description of how the quota check failed. Clients can use this + // description to find more about the quota configuration in the service's + // public documentation, or find the relevant quota limit to adjust through + // developer console. + // + // For example: "Service disabled" or "Daily Limit for read operations + // exceeded". + string description = 2; + } + + // Describes all quota violations. + repeated Violation violations = 1; +} + +// Describes what preconditions have failed. +// +// For example, if an RPC failed because it required the Terms of Service to be +// acknowledged, it could list the terms of service violation in the +// PreconditionFailure message. +message PreconditionFailure { + // A message type used to describe a single precondition failure. + message Violation { + // The type of PreconditionFailure. We recommend using a service-specific + // enum type to define the supported precondition violation types. For + // example, "TOS" for "Terms of Service violation". + string type = 1; + + // The subject, relative to the type, that failed. + // For example, "google.com/cloud" relative to the "TOS" type would + // indicate which terms of service is being referenced. + string subject = 2; + + // A description of how the precondition failed. Developers can use this + // description to understand how to fix the failure. + // + // For example: "Terms of service not accepted". + string description = 3; + } + + // Describes all precondition violations. + repeated Violation violations = 1; +} + +// Describes violations in a client request. This error type focuses on the +// syntactic aspects of the request. +message BadRequest { + // A message type used to describe a single bad request field. + message FieldViolation { + // A path leading to a field in the request body. The value will be a + // sequence of dot-separated identifiers that identify a protocol buffer + // field. E.g., "field_violations.field" would identify this field. + string field = 1; + + // A description of why the request element is bad. + string description = 2; + } + + // Describes all violations in a client request. + repeated FieldViolation field_violations = 1; +} + +// Contains metadata about the request that clients can attach when filing a bug +// or providing other forms of feedback. +message RequestInfo { + // An opaque string that should only be interpreted by the service generating + // it. For example, it can be used to identify requests in the service's logs. + string request_id = 1; + + // Any data that was used to serve this request. For example, an encrypted + // stack trace that can be sent back to the service provider for debugging. + string serving_data = 2; +} + +// Describes the resource that is being accessed. +message ResourceInfo { + // A name for the type of resource being accessed, e.g. "sql table", + // "cloud storage bucket", "file", "Google calendar"; or the type URL + // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". + string resource_type = 1; + + // The name of the resource being accessed. For example, a shared calendar + // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current + // error is + // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + string resource_name = 2; + + // The owner of the resource (optional). + // For example, "user:" or "project:". + string owner = 3; + + // Describes what error is encountered when accessing this resource. + // For example, updating a cloud project may require the `writer` permission + // on the developer console project. + string description = 4; +} + +// Provides links to documentation or for performing an out of band action. +// +// For example, if a quota check failed with an error indicating the calling +// project hasn't enabled the accessed service, this can contain a URL pointing +// directly to the right place in the developer console to flip the bit. +message Help { + // Describes a URL link. + message Link { + // Describes what the link offers. + string description = 1; + + // The URL of the link. + string url = 2; + } + + // URL(s) pointing to additional information on handling the current error. + repeated Link links = 1; +} + +// Provides a localized error message that is safe to return to the user +// which can be attached to an RPC error. +message LocalizedMessage { + // The locale used following the specification defined at + // http://www.rfc-editor.org/rfc/bcp/bcp47.txt. + // Examples are: "en-US", "fr-CH", "es-MX" + string locale = 1; + + // The localized error message in the above locale. + string message = 2; +} diff --git a/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go b/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go new file mode 100644 index 00000000..59793ba3 --- /dev/null +++ b/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go @@ -0,0 +1,731 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/rpc/status.proto + +package rpc + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + types "github.com/gogo/protobuf/types" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). The error model is designed to be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error +// message, and error details. The error code should be an enum value of +// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes +// if needed. The error message should be a developer-facing English message +// that helps developers *understand* and *resolve* the error. If a localized +// user-facing error message is needed, put the localized message in the error +// details or localize it in the client. The optional error details may contain +// arbitrary information about the error. There is a predefined set of error +// detail types in the package `google.rpc` that can be used for common error +// conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error model, but it +// is not necessarily the actual wire format. When the `Status` message is +// exposed in different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety of +// environments, either with or without APIs, to provide a +// consistent developer experience across different environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the client, +// it may embed the `Status` in the normal response to indicate the partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step may +// have a `Status` message for error reporting. +// +// - Batch operations. If a client uses batch request and batch response, the +// `Status` message should be used directly inside batch response, one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous operation +// results in its response, the status of those operations should be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message `Status` could +// be used directly after any stripping needed for security/privacy reasons. +type Status struct { + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*types.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_24d244abaf643bfe, []int{0} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return m.Size() +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Status) GetDetails() []*types.Any { + if m != nil { + return m.Details + } + return nil +} + +func (*Status) XXX_MessageName() string { + return "google.rpc.Status" +} +func init() { + proto.RegisterType((*Status)(nil), "google.rpc.Status") +} + +func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) } + +var fileDescriptor_24d244abaf643bfe = []byte{ + // 235 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, + 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, + 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, + 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, + 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, + 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, + 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0xb8, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, + 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, + 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, + 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x8b, 0x2f, 0x39, 0x3f, + 0x57, 0x0f, 0xe1, 0x11, 0x27, 0x6e, 0x88, 0x5b, 0x03, 0x40, 0x56, 0x04, 0x30, 0x46, 0x31, 0x17, + 0x15, 0x24, 0x2f, 0x62, 0x62, 0x0e, 0x0a, 0x70, 0x4e, 0x62, 0x03, 0x5b, 0x6b, 0x0c, 0x08, 0x00, + 0x00, 0xff, 0xff, 0xaa, 0x06, 0xa1, 0xaa, 0x10, 0x01, 0x00, 0x00, +} + +func (this *Status) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Status) + if !ok { + that2, ok := that.(Status) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Code != that1.Code { + if this.Code < that1.Code { + return -1 + } + return 1 + } + if this.Message != that1.Message { + if this.Message < that1.Message { + return -1 + } + return 1 + } + if len(this.Details) != len(that1.Details) { + if len(this.Details) < len(that1.Details) { + return -1 + } + return 1 + } + for i := range this.Details { + if c := this.Details[i].Compare(that1.Details[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Status) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Status) + if !ok { + that2, ok := that.(Status) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Code != that1.Code { + return false + } + if this.Message != that1.Message { + return false + } + if len(this.Details) != len(that1.Details) { + return false + } + for i := range this.Details { + if !this.Details[i].Equal(that1.Details[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Status) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&rpc.Status{") + s = append(s, "Code: "+fmt.Sprintf("%#v", this.Code)+",\n") + s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n") + if this.Details != nil { + s = append(s, "Details: "+fmt.Sprintf("%#v", this.Details)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringStatus(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Status) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Status) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Details) > 0 { + for iNdEx := len(m.Details) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Details[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStatus(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintStatus(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + } + if m.Code != 0 { + i = encodeVarintStatus(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintStatus(dAtA []byte, offset int, v uint64) int { + offset -= sovStatus(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedStatus(r randyStatus, easy bool) *Status { + this := &Status{} + this.Code = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Code *= -1 + } + this.Message = string(randStringStatus(r)) + if r.Intn(5) != 0 { + v1 := r.Intn(5) + this.Details = make([]*types.Any, v1) + for i := 0; i < v1; i++ { + this.Details[i] = types.NewPopulatedAny(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStatus(r, 4) + } + return this +} + +type randyStatus interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneStatus(r randyStatus) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringStatus(r randyStatus) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneStatus(r) + } + return string(tmps) +} +func randUnrecognizedStatus(r randyStatus, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldStatus(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldStatus(dAtA []byte, r randyStatus, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateStatus(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateStatus(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateStatus(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateStatus(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateStatus(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateStatus(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateStatus(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Status) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovStatus(uint64(m.Code)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovStatus(uint64(l)) + } + if len(m.Details) > 0 { + for _, e := range m.Details { + l = e.Size() + n += 1 + l + sovStatus(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovStatus(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozStatus(x uint64) (n int) { + return sovStatus(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Status) String() string { + if this == nil { + return "nil" + } + repeatedStringForDetails := "[]*Any{" + for _, f := range this.Details { + repeatedStringForDetails += strings.Replace(fmt.Sprintf("%v", f), "Any", "types.Any", 1) + "," + } + repeatedStringForDetails += "}" + s := strings.Join([]string{`&Status{`, + `Code:` + fmt.Sprintf("%v", this.Code) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Details:` + repeatedStringForDetails + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringStatus(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Status) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStatus + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Status: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStatus + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStatus + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStatus + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStatus + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStatus + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStatus + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStatus + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Details = append(m.Details, &types.Any{}) + if err := m.Details[len(m.Details)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStatus(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStatus + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStatus + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStatus(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStatus + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStatus + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStatus + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthStatus + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupStatus + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthStatus + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthStatus = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStatus = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupStatus = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/googleapis/google/rpc/status.proto b/vendor/github.com/gogo/googleapis/google/rpc/status.proto new file mode 100644 index 00000000..abcd4534 --- /dev/null +++ b/vendor/github.com/gogo/googleapis/google/rpc/status.proto @@ -0,0 +1,94 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.rpc; + +import "google/protobuf/any.proto"; + +option go_package = "rpc"; +option java_multiple_files = true; +option java_outer_classname = "StatusProto"; +option java_package = "com.google.rpc"; +option objc_class_prefix = "RPC"; + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). The error model is designed to be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error +// message, and error details. The error code should be an enum value of +// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes +// if needed. The error message should be a developer-facing English message +// that helps developers *understand* and *resolve* the error. If a localized +// user-facing error message is needed, put the localized message in the error +// details or localize it in the client. The optional error details may contain +// arbitrary information about the error. There is a predefined set of error +// detail types in the package `google.rpc` that can be used for common error +// conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error model, but it +// is not necessarily the actual wire format. When the `Status` message is +// exposed in different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety of +// environments, either with or without APIs, to provide a +// consistent developer experience across different environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the client, +// it may embed the `Status` in the normal response to indicate the partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step may +// have a `Status` message for error reporting. +// +// - Batch operations. If a client uses batch request and batch response, the +// `Status` message should be used directly inside batch response, one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous operation +// results in its response, the status of those operations should be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message `Status` could +// be used directly after any stripping needed for security/privacy reasons. +message Status { + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. + int32 code = 1; + + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. + string message = 2; + + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + repeated google.protobuf.Any details = 3; +} diff --git a/vendor/github.com/gogo/protobuf/types/any.go b/vendor/github.com/gogo/protobuf/types/any.go new file mode 100644 index 00000000..df4787de --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/any.go @@ -0,0 +1,140 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/gogo/protobuf/proto" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func EmptyAny(any *Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = EmptyAny(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *Any, pb proto.Message) bool { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { + return false + } + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name +} diff --git a/vendor/github.com/gogo/protobuf/types/any.pb.go b/vendor/github.com/gogo/protobuf/types/any.pb.go new file mode 100644 index 00000000..98e269d5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/any.pb.go @@ -0,0 +1,697 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/any.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_b53526c13ae22eb4, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) +} +func (m *Any) XXX_Size() int { + return m.Size() +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (*Any) XXX_MessageName() string { + return "google.protobuf.Any" +} +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } + +var fileDescriptor_b53526c13ae22eb4 = []byte{ + // 211 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xaa, 0xbf, 0xf1, 0x50, 0x8e, + 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x1f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, + 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, + 0x24, 0xc7, 0xf0, 0x01, 0x24, 0xfe, 0x58, 0x8e, 0xf1, 0xc4, 0x63, 0x39, 0x46, 0x2e, 0xe1, 0xe4, + 0xfc, 0x5c, 0x3d, 0x34, 0xeb, 0x9d, 0x38, 0x1c, 0xf3, 0x2a, 0x03, 0x40, 0x9c, 0x00, 0xc6, 0x28, + 0x56, 0x90, 0x8d, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x94, + 0x06, 0x40, 0x95, 0xea, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0x94, + 0x25, 0xb1, 0x81, 0xcd, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x81, 0x82, 0xd3, 0xed, + 0x00, 0x00, 0x00, +} + +func (this *Any) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Any) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Any) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Any{") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAny(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Any) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Any) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Any) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintAny(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintAny(dAtA []byte, offset int, v uint64) int { + offset -= sovAny(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedAny(r randyAny, easy bool) *Any { + this := &Any{} + this.TypeUrl = string(randStringAny(r)) + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedAny(r, 3) + } + return this +} + +type randyAny interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneAny(r randyAny) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringAny(r randyAny) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneAny(r) + } + return string(tmps) +} +func randUnrecognizedAny(r randyAny, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldAny(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldAny(dAtA []byte, r randyAny, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateAny(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateAny(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateAny(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Any) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovAny(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAny(x uint64) (n int) { + return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Any) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Any{`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringAny(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Any) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Any: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAny + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthAny + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAny(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAny + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAny + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAny(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAny + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAny + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAny + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAny = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAny = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/api.pb.go b/vendor/github.com/gogo/protobuf/types/api.pb.go new file mode 100644 index 00000000..58bf4b53 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/api.pb.go @@ -0,0 +1,2143 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/api.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +type Api struct { + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The methods of this interface, in unspecified order. + Methods []*Method `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` + // Any metadata attached to the interface. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + // + // + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + // Source context for the protocol buffer service represented by this + // message. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // Included interfaces. See [Mixin][]. + Mixins []*Mixin `protobuf:"bytes,6,rep,name=mixins,proto3" json:"mixins,omitempty"` + // The source syntax of the service. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Api) Reset() { *m = Api{} } +func (*Api) ProtoMessage() {} +func (*Api) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{0} +} +func (m *Api) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Api) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Api.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Api) XXX_Merge(src proto.Message) { + xxx_messageInfo_Api.Merge(m, src) +} +func (m *Api) XXX_Size() int { + return m.Size() +} +func (m *Api) XXX_DiscardUnknown() { + xxx_messageInfo_Api.DiscardUnknown(m) +} + +var xxx_messageInfo_Api proto.InternalMessageInfo + +func (m *Api) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Api) GetMethods() []*Method { + if m != nil { + return m.Methods + } + return nil +} + +func (m *Api) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Api) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Api) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Api) GetMixins() []*Mixin { + if m != nil { + return m.Mixins + } + return nil +} + +func (m *Api) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Api) XXX_MessageName() string { + return "google.protobuf.Api" +} + +// Method represents a method of an API interface. +type Method struct { + // The simple name of this method. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A URL of the input message type. + RequestTypeUrl string `protobuf:"bytes,2,opt,name=request_type_url,json=requestTypeUrl,proto3" json:"request_type_url,omitempty"` + // If true, the request is streamed. + RequestStreaming bool `protobuf:"varint,3,opt,name=request_streaming,json=requestStreaming,proto3" json:"request_streaming,omitempty"` + // The URL of the output message type. + ResponseTypeUrl string `protobuf:"bytes,4,opt,name=response_type_url,json=responseTypeUrl,proto3" json:"response_type_url,omitempty"` + // If true, the response is streamed. + ResponseStreaming bool `protobuf:"varint,5,opt,name=response_streaming,json=responseStreaming,proto3" json:"response_streaming,omitempty"` + // Any metadata attached to the method. + Options []*Option `protobuf:"bytes,6,rep,name=options,proto3" json:"options,omitempty"` + // The source syntax of this method. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Method) Reset() { *m = Method{} } +func (*Method) ProtoMessage() {} +func (*Method) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{1} +} +func (m *Method) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Method) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Method.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Method) XXX_Merge(src proto.Message) { + xxx_messageInfo_Method.Merge(m, src) +} +func (m *Method) XXX_Size() int { + return m.Size() +} +func (m *Method) XXX_DiscardUnknown() { + xxx_messageInfo_Method.DiscardUnknown(m) +} + +var xxx_messageInfo_Method proto.InternalMessageInfo + +func (m *Method) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Method) GetRequestTypeUrl() string { + if m != nil { + return m.RequestTypeUrl + } + return "" +} + +func (m *Method) GetRequestStreaming() bool { + if m != nil { + return m.RequestStreaming + } + return false +} + +func (m *Method) GetResponseTypeUrl() string { + if m != nil { + return m.ResponseTypeUrl + } + return "" +} + +func (m *Method) GetResponseStreaming() bool { + if m != nil { + return m.ResponseStreaming + } + return false +} + +func (m *Method) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Method) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Method) XXX_MessageName() string { + return "google.protobuf.Method" +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +type Mixin struct { + // The fully qualified name of the interface which is included. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mixin) Reset() { *m = Mixin{} } +func (*Mixin) ProtoMessage() {} +func (*Mixin) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{2} +} +func (m *Mixin) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mixin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Mixin.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Mixin) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mixin.Merge(m, src) +} +func (m *Mixin) XXX_Size() int { + return m.Size() +} +func (m *Mixin) XXX_DiscardUnknown() { + xxx_messageInfo_Mixin.DiscardUnknown(m) +} + +var xxx_messageInfo_Mixin proto.InternalMessageInfo + +func (m *Mixin) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Mixin) GetRoot() string { + if m != nil { + return m.Root + } + return "" +} + +func (*Mixin) XXX_MessageName() string { + return "google.protobuf.Mixin" +} +func init() { + proto.RegisterType((*Api)(nil), "google.protobuf.Api") + proto.RegisterType((*Method)(nil), "google.protobuf.Method") + proto.RegisterType((*Mixin)(nil), "google.protobuf.Mixin") +} + +func init() { proto.RegisterFile("google/protobuf/api.proto", fileDescriptor_a2ec32096296c143) } + +var fileDescriptor_a2ec32096296c143 = []byte{ + // 467 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x31, 0x6f, 0x13, 0x31, + 0x14, 0xc7, 0xeb, 0xbb, 0xe4, 0x52, 0x5c, 0x91, 0x82, 0x91, 0xc0, 0x64, 0xb0, 0x4e, 0x15, 0xc3, + 0x09, 0xc4, 0x45, 0x94, 0x4f, 0xd0, 0x20, 0xd4, 0x01, 0x21, 0xa2, 0x0b, 0x08, 0x89, 0x25, 0x4a, + 0x83, 0x09, 0x96, 0xee, 0x6c, 0x63, 0x3b, 0x90, 0x4c, 0xf0, 0x59, 0x98, 0x10, 0x23, 0xdf, 0x80, + 0xad, 0x23, 0x23, 0x23, 0xb9, 0x2e, 0x8c, 0x1d, 0x19, 0x91, 0x7d, 0xe7, 0xa6, 0x5c, 0x83, 0x04, + 0x9b, 0xdf, 0xfb, 0xff, 0xfc, 0xf7, 0x7b, 0x7f, 0xc3, 0x9b, 0x33, 0x21, 0x66, 0x39, 0xed, 0x4b, + 0x25, 0x8c, 0x38, 0x9a, 0xbf, 0xea, 0x4f, 0x24, 0x4b, 0x5d, 0x81, 0x76, 0x2b, 0x29, 0xf5, 0x52, + 0xef, 0x56, 0x93, 0xd5, 0x62, 0xae, 0xa6, 0x74, 0x3c, 0x15, 0xdc, 0xd0, 0x85, 0xa9, 0xc0, 0x5e, + 0xaf, 0x49, 0x99, 0xa5, 0xac, 0x4d, 0xf6, 0xbe, 0x06, 0x30, 0x3c, 0x90, 0x0c, 0x21, 0xd8, 0xe2, + 0x93, 0x82, 0x62, 0x10, 0x83, 0xe4, 0x52, 0xe6, 0xce, 0xe8, 0x1e, 0xec, 0x14, 0xd4, 0xbc, 0x16, + 0x2f, 0x35, 0x0e, 0xe2, 0x30, 0xd9, 0xd9, 0xbf, 0x91, 0x36, 0x06, 0x48, 0x1f, 0x3b, 0x3d, 0xf3, + 0x9c, 0xbd, 0x22, 0xa4, 0x61, 0x82, 0x6b, 0x1c, 0xfe, 0xe5, 0xca, 0x13, 0xa7, 0x67, 0x9e, 0x43, + 0x18, 0x76, 0xde, 0x52, 0xa5, 0x99, 0xe0, 0xb8, 0xe5, 0x1e, 0xf7, 0x25, 0x7a, 0x08, 0xbb, 0x7f, + 0xee, 0x83, 0xdb, 0x31, 0x48, 0x76, 0xf6, 0xc9, 0x05, 0xcf, 0x91, 0xc3, 0x1e, 0x54, 0x54, 0x76, + 0x59, 0x9f, 0x2f, 0x51, 0x0a, 0xa3, 0x82, 0x2d, 0x18, 0xd7, 0x38, 0x72, 0x23, 0x5d, 0xbf, 0xb8, + 0x85, 0x95, 0xb3, 0x9a, 0x42, 0x7d, 0x18, 0xe9, 0x25, 0x37, 0x93, 0x05, 0xee, 0xc4, 0x20, 0xe9, + 0x6e, 0x58, 0x61, 0xe4, 0xe4, 0xac, 0xc6, 0xf6, 0xbe, 0x04, 0x30, 0xaa, 0x82, 0xd8, 0x18, 0x63, + 0x02, 0xaf, 0x28, 0xfa, 0x66, 0x4e, 0xb5, 0x19, 0xdb, 0xe0, 0xc7, 0x73, 0x95, 0xe3, 0xc0, 0xe9, + 0xdd, 0xba, 0xff, 0x74, 0x29, 0xe9, 0x33, 0x95, 0xa3, 0x3b, 0xf0, 0xaa, 0x27, 0xb5, 0x51, 0x74, + 0x52, 0x30, 0x3e, 0xc3, 0x61, 0x0c, 0x92, 0xed, 0xcc, 0x5b, 0x8c, 0x7c, 0x1f, 0xdd, 0xb6, 0xb0, + 0x96, 0x82, 0x6b, 0xba, 0xf6, 0xad, 0x12, 0xdc, 0xf5, 0x82, 0x37, 0xbe, 0x0b, 0xd1, 0x19, 0xbb, + 0x76, 0x6e, 0x3b, 0xe7, 0x33, 0x97, 0xb5, 0xf5, 0xb9, 0x5f, 0x8c, 0xfe, 0xf1, 0x17, 0xff, 0x3b, + 0xb4, 0x3e, 0x6c, 0xbb, 0xd8, 0x37, 0x46, 0x86, 0x60, 0x4b, 0x09, 0x61, 0xea, 0x98, 0xdc, 0x79, + 0xf0, 0xfe, 0xfb, 0x8a, 0x6c, 0x9d, 0xae, 0x08, 0xf8, 0xb5, 0x22, 0xe0, 0x43, 0x49, 0xc0, 0xa7, + 0x92, 0x80, 0xe3, 0x92, 0x80, 0x6f, 0x25, 0x01, 0x3f, 0x4a, 0x02, 0x7e, 0x96, 0x64, 0xeb, 0xd4, + 0xf6, 0x4f, 0x08, 0x38, 0x3e, 0x21, 0x00, 0x5e, 0x9b, 0x8a, 0xa2, 0x39, 0xc6, 0x60, 0xfb, 0x40, + 0xb2, 0xa1, 0x2d, 0x86, 0xe0, 0x45, 0xdb, 0xe6, 0xa6, 0x3f, 0x06, 0xe1, 0xe1, 0x70, 0xf0, 0x39, + 0x20, 0x87, 0x15, 0x3a, 0xf4, 0x13, 0x3f, 0xa7, 0x79, 0xfe, 0x88, 0x8b, 0x77, 0xdc, 0xc6, 0xa8, + 0x8f, 0x22, 0xe7, 0x71, 0xff, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x64, 0x40, 0x40, 0xa1, + 0x03, 0x00, 0x00, +} + +func (this *Api) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Methods) != len(that1.Methods) { + if len(this.Methods) < len(that1.Methods) { + return -1 + } + return 1 + } + for i := range this.Methods { + if c := this.Methods[i].Compare(that1.Methods[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Version != that1.Version { + if this.Version < that1.Version { + return -1 + } + return 1 + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if len(this.Mixins) != len(that1.Mixins) { + if len(this.Mixins) < len(that1.Mixins) { + return -1 + } + return 1 + } + for i := range this.Mixins { + if c := this.Mixins[i].Compare(that1.Mixins[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Method) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + if this.RequestTypeUrl < that1.RequestTypeUrl { + return -1 + } + return 1 + } + if this.RequestStreaming != that1.RequestStreaming { + if !this.RequestStreaming { + return -1 + } + return 1 + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + if this.ResponseTypeUrl < that1.ResponseTypeUrl { + return -1 + } + return 1 + } + if this.ResponseStreaming != that1.ResponseStreaming { + if !this.ResponseStreaming { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Mixin) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Root != that1.Root { + if this.Root < that1.Root { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Api) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Methods) != len(that1.Methods) { + return false + } + for i := range this.Methods { + if !this.Methods[i].Equal(that1.Methods[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Version != that1.Version { + return false + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if len(this.Mixins) != len(that1.Mixins) { + return false + } + for i := range this.Mixins { + if !this.Mixins[i].Equal(that1.Mixins[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Method) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + return false + } + if this.RequestStreaming != that1.RequestStreaming { + return false + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + return false + } + if this.ResponseStreaming != that1.ResponseStreaming { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Mixin) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Root != that1.Root { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Api) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Api{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Methods != nil { + s = append(s, "Methods: "+fmt.Sprintf("%#v", this.Methods)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n") + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + if this.Mixins != nil { + s = append(s, "Mixins: "+fmt.Sprintf("%#v", this.Mixins)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Method) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Method{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "RequestTypeUrl: "+fmt.Sprintf("%#v", this.RequestTypeUrl)+",\n") + s = append(s, "RequestStreaming: "+fmt.Sprintf("%#v", this.RequestStreaming)+",\n") + s = append(s, "ResponseTypeUrl: "+fmt.Sprintf("%#v", this.ResponseTypeUrl)+",\n") + s = append(s, "ResponseStreaming: "+fmt.Sprintf("%#v", this.ResponseStreaming)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Mixin) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Mixin{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Root: "+fmt.Sprintf("%#v", this.Root)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringApi(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Api) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Api) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Api) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x38 + } + if len(m.Mixins) > 0 { + for iNdEx := len(m.Mixins) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mixins[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintApi(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Methods) > 0 { + for iNdEx := len(m.Methods) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Methods[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Method) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Method) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Method) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x38 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.ResponseStreaming { + i-- + if m.ResponseStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.ResponseTypeUrl) > 0 { + i -= len(m.ResponseTypeUrl) + copy(dAtA[i:], m.ResponseTypeUrl) + i = encodeVarintApi(dAtA, i, uint64(len(m.ResponseTypeUrl))) + i-- + dAtA[i] = 0x22 + } + if m.RequestStreaming { + i-- + if m.RequestStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.RequestTypeUrl) > 0 { + i -= len(m.RequestTypeUrl) + copy(dAtA[i:], m.RequestTypeUrl) + i = encodeVarintApi(dAtA, i, uint64(len(m.RequestTypeUrl))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Mixin) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mixin) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Mixin) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Root) > 0 { + i -= len(m.Root) + copy(dAtA[i:], m.Root) + i = encodeVarintApi(dAtA, i, uint64(len(m.Root))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + offset -= sovApi(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedApi(r randyApi, easy bool) *Api { + this := &Api{} + this.Name = string(randStringApi(r)) + if r.Intn(5) != 0 { + v1 := r.Intn(5) + this.Methods = make([]*Method, v1) + for i := 0; i < v1; i++ { + this.Methods[i] = NewPopulatedMethod(r, easy) + } + } + if r.Intn(5) != 0 { + v2 := r.Intn(5) + this.Options = make([]*Option, v2) + for i := 0; i < v2; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Version = string(randStringApi(r)) + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + if r.Intn(5) != 0 { + v3 := r.Intn(5) + this.Mixins = make([]*Mixin, v3) + for i := 0; i < v3; i++ { + this.Mixins[i] = NewPopulatedMixin(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 8) + } + return this +} + +func NewPopulatedMethod(r randyApi, easy bool) *Method { + this := &Method{} + this.Name = string(randStringApi(r)) + this.RequestTypeUrl = string(randStringApi(r)) + this.RequestStreaming = bool(bool(r.Intn(2) == 0)) + this.ResponseTypeUrl = string(randStringApi(r)) + this.ResponseStreaming = bool(bool(r.Intn(2) == 0)) + if r.Intn(5) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 8) + } + return this +} + +func NewPopulatedMixin(r randyApi, easy bool) *Mixin { + this := &Mixin{} + this.Name = string(randStringApi(r)) + this.Root = string(randStringApi(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 3) + } + return this +} + +type randyApi interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneApi(r randyApi) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringApi(r randyApi) string { + v5 := r.Intn(100) + tmps := make([]rune, v5) + for i := 0; i < v5; i++ { + tmps[i] = randUTF8RuneApi(r) + } + return string(tmps) +} +func randUnrecognizedApi(r randyApi, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldApi(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldApi(dAtA []byte, r randyApi, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + v6 := r.Int63() + if r.Intn(2) == 0 { + v6 *= -1 + } + dAtA = encodeVarintPopulateApi(dAtA, uint64(v6)) + case 1: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateApi(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateApi(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Api) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Methods) > 0 { + for _, e := range m.Methods { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Mixins) > 0 { + for _, e := range m.Mixins { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Method) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.RequestTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.RequestStreaming { + n += 2 + } + l = len(m.ResponseTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.ResponseStreaming { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Mixin) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Root) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovApi(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Api) String() string { + if this == nil { + return "nil" + } + repeatedStringForMethods := "[]*Method{" + for _, f := range this.Methods { + repeatedStringForMethods += strings.Replace(f.String(), "Method", "Method", 1) + "," + } + repeatedStringForMethods += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(fmt.Sprintf("%v", f), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + repeatedStringForMixins := "[]*Mixin{" + for _, f := range this.Mixins { + repeatedStringForMixins += strings.Replace(f.String(), "Mixin", "Mixin", 1) + "," + } + repeatedStringForMixins += "}" + s := strings.Join([]string{`&Api{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Methods:` + repeatedStringForMethods + `,`, + `Options:` + repeatedStringForOptions + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Mixins:` + repeatedStringForMixins + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Method) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(fmt.Sprintf("%v", f), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Method{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RequestTypeUrl:` + fmt.Sprintf("%v", this.RequestTypeUrl) + `,`, + `RequestStreaming:` + fmt.Sprintf("%v", this.RequestStreaming) + `,`, + `ResponseTypeUrl:` + fmt.Sprintf("%v", this.ResponseTypeUrl) + `,`, + `ResponseStreaming:` + fmt.Sprintf("%v", this.ResponseStreaming) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Mixin) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mixin{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Root:` + fmt.Sprintf("%v", this.Root) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Api) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Api: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Api: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Methods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Methods = append(m.Methods, &Method{}) + if err := m.Methods[len(m.Methods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mixins", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mixins = append(m.Mixins, &Mixin{}) + if err := m.Mixins[len(m.Mixins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Method) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Method: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Method: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RequestStreaming = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResponseTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ResponseStreaming = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mixin) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mixin: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mixin: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Root = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthApi + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupApi + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthApi + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/doc.go b/vendor/github.com/gogo/protobuf/types/doc.go new file mode 100644 index 00000000..ff2810af --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package types contains code for interacting with well-known types. +*/ +package types diff --git a/vendor/github.com/gogo/protobuf/types/duration.go b/vendor/github.com/gogo/protobuf/types/duration.go new file mode 100644 index 00000000..979b8e78 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func DurationFromProto(p *Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) * time.Nanosecond + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func DurationProto(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/types/duration.pb.go b/vendor/github.com/gogo/protobuf/types/duration.pb.go new file mode 100644 index 00000000..3959f066 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration.pb.go @@ -0,0 +1,520 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/duration.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_23597b2ebd7ac6c5, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return m.Size() +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func (*Duration) XXX_MessageName() string { + return "google.protobuf.Duration" +} +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } + +var fileDescriptor_23597b2ebd7ac6c5 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0x7f, 0xe3, 0xa1, 0x1c, + 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, + 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0xb8, 0xe2, + 0xb1, 0x1c, 0xe3, 0x89, 0xc7, 0x72, 0x8c, 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x56, 0x3b, + 0xf1, 0xc2, 0x2c, 0x0e, 0x00, 0x89, 0x04, 0x30, 0x46, 0xb1, 0x96, 0x54, 0x16, 0xa4, 0x16, 0xff, + 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x25, 0x00, + 0xaa, 0x45, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x32, 0x89, + 0x0d, 0x6c, 0x96, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x1c, 0x64, 0x4e, 0xf6, 0x00, 0x00, + 0x00, +} + +func (this *Duration) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Duration) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Duration) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Duration{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDuration(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Duration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Nanos != 0 { + i = encodeVarintDuration(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = encodeVarintDuration(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintDuration(dAtA []byte, offset int, v uint64) int { + offset -= sovDuration(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Duration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovDuration(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovDuration(uint64(m.Nanos)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovDuration(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDuration(x uint64) (n int) { + return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Duration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDuration(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDuration + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDuration + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDuration(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDuration + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDuration + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDuration + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDuration = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/duration_gogo.go b/vendor/github.com/gogo/protobuf/types/duration_gogo.go new file mode 100644 index 00000000..90e7670e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration_gogo.go @@ -0,0 +1,100 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "fmt" + "time" +) + +func NewPopulatedDuration(r interface { + Int63() int64 +}, easy bool) *Duration { + this := &Duration{} + maxSecs := time.Hour.Nanoseconds() / 1e9 + max := 2 * maxSecs + s := int64(r.Int63()) % max + s -= maxSecs + neg := int64(1) + if s < 0 { + neg = -1 + } + this.Seconds = s + this.Nanos = int32(neg * (r.Int63() % 1e9)) + return this +} + +func (d *Duration) String() string { + td, err := DurationFromProto(d) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return td.String() +} + +func NewPopulatedStdDuration(r interface { + Int63() int64 +}, easy bool) *time.Duration { + dur := NewPopulatedDuration(r, easy) + d, err := DurationFromProto(dur) + if err != nil { + return nil + } + return &d +} + +func SizeOfStdDuration(d time.Duration) int { + dur := DurationProto(d) + return dur.Size() +} + +func StdDurationMarshal(d time.Duration) ([]byte, error) { + size := SizeOfStdDuration(d) + buf := make([]byte, size) + _, err := StdDurationMarshalTo(d, buf) + return buf, err +} + +func StdDurationMarshalTo(d time.Duration, data []byte) (int, error) { + dur := DurationProto(d) + return dur.MarshalTo(data) +} + +func StdDurationUnmarshal(d *time.Duration, data []byte) error { + dur := &Duration{} + if err := dur.Unmarshal(data); err != nil { + return err + } + dd, err := DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil +} diff --git a/vendor/github.com/gogo/protobuf/types/empty.pb.go b/vendor/github.com/gogo/protobuf/types/empty.pb.go new file mode 100644 index 00000000..17e3aa55 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/empty.pb.go @@ -0,0 +1,465 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_900544acb223d5b8, []int{0} +} +func (*Empty) XXX_WellKnownType() string { return "Empty" } +func (m *Empty) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return m.Size() +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func (*Empty) XXX_MessageName() string { + return "google.protobuf.Empty" +} +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) } + +var fileDescriptor_900544acb223d5b8 = []byte{ + // 176 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x0b, 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28, + 0xc7, 0xd8, 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, + 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72, + 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe8, 0xc4, 0x05, + 0x36, 0x2e, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8, + 0xb8, 0x88, 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x7d, 0x00, 0x54, 0xbd, + 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, + 0x20, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0xbe, 0xb6, 0x31, 0xc6, 0x00, 0x00, 0x00, +} + +func (this *Empty) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Empty) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Empty) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&types.Empty{") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringEmpty(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Empty) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Empty) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int { + offset -= sovEmpty(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty { + this := &Empty{} + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedEmpty(r, 1) + } + return this +} + +type randyEmpty interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneEmpty(r randyEmpty) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringEmpty(r randyEmpty) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneEmpty(r) + } + return string(tmps) +} +func randUnrecognizedEmpty(r randyEmpty, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldEmpty(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldEmpty(dAtA []byte, r randyEmpty, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateEmpty(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Empty) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEmpty(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEmpty(x uint64) (n int) { + return sovEmpty(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Empty) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Empty{`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringEmpty(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Empty) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEmpty + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipEmpty(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEmpty + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEmpty + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEmpty(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEmpty + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEmpty + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEmpty + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEmpty = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/field_mask.pb.go b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go new file mode 100644 index 00000000..7226b57f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go @@ -0,0 +1,741 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { + return fileDescriptor_5158202634f0da48, []int{0} +} +func (m *FieldMask) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FieldMask) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldMask.Merge(m, src) +} +func (m *FieldMask) XXX_Size() int { + return m.Size() +} +func (m *FieldMask) XXX_DiscardUnknown() { + xxx_messageInfo_FieldMask.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldMask proto.InternalMessageInfo + +func (m *FieldMask) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func (*FieldMask) XXX_MessageName() string { + return "google.protobuf.FieldMask" +} +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} + +func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_5158202634f0da48) } + +var fileDescriptor_5158202634f0da48 = []byte{ + // 203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, + 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16, + 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x1d, 0x8c, + 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xf8, 0xe3, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, + 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, + 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x80, 0xc4, 0x1f, 0xcb, 0x31, 0x9e, 0x78, 0x2c, 0xc7, + 0xc8, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0x95, 0x13, 0x1f, 0xdc, 0xa2, 0x00, 0x90, 0x50, + 0x00, 0x63, 0x14, 0x6b, 0x49, 0x65, 0x41, 0x6a, 0xf1, 0x0f, 0x46, 0xc6, 0x45, 0x4c, 0xcc, 0xee, + 0x01, 0x4e, 0xab, 0x98, 0xe4, 0xdc, 0x21, 0x7a, 0x02, 0xa0, 0x7a, 0xf4, 0xc2, 0x53, 0x73, 0x72, + 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0x2a, 0x93, 0xd8, 0xc0, 0x86, 0x19, 0x03, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x43, 0xa0, 0x83, 0xd0, 0xe9, 0x00, 0x00, 0x00, +} + +func (this *FieldMask) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Paths) != len(that1.Paths) { + if len(this.Paths) < len(that1.Paths) { + return -1 + } + return 1 + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + if this.Paths[i] < that1.Paths[i] { + return -1 + } + return 1 + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *FieldMask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Paths) != len(that1.Paths) { + return false + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *FieldMask) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FieldMask{") + s = append(s, "Paths: "+fmt.Sprintf("%#v", this.Paths)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFieldMask(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *FieldMask) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldMask) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintFieldMask(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int { + offset -= sovFieldMask(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask { + this := &FieldMask{} + v1 := r.Intn(10) + this.Paths = make([]string, v1) + for i := 0; i < v1; i++ { + this.Paths[i] = string(randStringFieldMask(r)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedFieldMask(r, 2) + } + return this +} + +type randyFieldMask interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneFieldMask(r randyFieldMask) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringFieldMask(r randyFieldMask) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneFieldMask(r) + } + return string(tmps) +} +func randUnrecognizedFieldMask(r randyFieldMask, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldFieldMask(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldFieldMask(dAtA []byte, r randyFieldMask, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateFieldMask(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *FieldMask) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovFieldMask(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovFieldMask(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFieldMask(x uint64) (n int) { + return sovFieldMask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FieldMask) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldMask{`, + `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringFieldMask(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FieldMask) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldMask: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldMask: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFieldMask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFieldMask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFieldMask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFieldMask + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFieldMask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFieldMask(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFieldMask + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupFieldMask + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthFieldMask + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupFieldMask = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/protosize.go b/vendor/github.com/gogo/protobuf/types/protosize.go new file mode 100644 index 00000000..3a2d1b7e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/protosize.go @@ -0,0 +1,34 @@ +package types + +func (m *Any) ProtoSize() (n int) { return m.Size() } +func (m *Api) ProtoSize() (n int) { return m.Size() } +func (m *Method) ProtoSize() (n int) { return m.Size() } +func (m *Mixin) ProtoSize() (n int) { return m.Size() } +func (m *Duration) ProtoSize() (n int) { return m.Size() } +func (m *Empty) ProtoSize() (n int) { return m.Size() } +func (m *FieldMask) ProtoSize() (n int) { return m.Size() } +func (m *SourceContext) ProtoSize() (n int) { return m.Size() } +func (m *Struct) ProtoSize() (n int) { return m.Size() } +func (m *Value) ProtoSize() (n int) { return m.Size() } +func (m *Value_NullValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_NumberValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_StringValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_BoolValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_StructValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_ListValue) ProtoSize() (n int) { return m.Size() } +func (m *ListValue) ProtoSize() (n int) { return m.Size() } +func (m *Timestamp) ProtoSize() (n int) { return m.Size() } +func (m *Type) ProtoSize() (n int) { return m.Size() } +func (m *Field) ProtoSize() (n int) { return m.Size() } +func (m *Enum) ProtoSize() (n int) { return m.Size() } +func (m *EnumValue) ProtoSize() (n int) { return m.Size() } +func (m *Option) ProtoSize() (n int) { return m.Size() } +func (m *DoubleValue) ProtoSize() (n int) { return m.Size() } +func (m *FloatValue) ProtoSize() (n int) { return m.Size() } +func (m *Int64Value) ProtoSize() (n int) { return m.Size() } +func (m *UInt64Value) ProtoSize() (n int) { return m.Size() } +func (m *Int32Value) ProtoSize() (n int) { return m.Size() } +func (m *UInt32Value) ProtoSize() (n int) { return m.Size() } +func (m *BoolValue) ProtoSize() (n int) { return m.Size() } +func (m *StringValue) ProtoSize() (n int) { return m.Size() } +func (m *BytesValue) ProtoSize() (n int) { return m.Size() } diff --git a/vendor/github.com/gogo/protobuf/types/source_context.pb.go b/vendor/github.com/gogo/protobuf/types/source_context.pb.go new file mode 100644 index 00000000..61045ce1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/source_context.pb.go @@ -0,0 +1,527 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/source_context.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +type SourceContext struct { + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceContext) Reset() { *m = SourceContext{} } +func (*SourceContext) ProtoMessage() {} +func (*SourceContext) Descriptor() ([]byte, []int) { + return fileDescriptor_b686cdb126d509db, []int{0} +} +func (m *SourceContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SourceContext.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SourceContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceContext.Merge(m, src) +} +func (m *SourceContext) XXX_Size() int { + return m.Size() +} +func (m *SourceContext) XXX_DiscardUnknown() { + xxx_messageInfo_SourceContext.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceContext proto.InternalMessageInfo + +func (m *SourceContext) GetFileName() string { + if m != nil { + return m.FileName + } + return "" +} + +func (*SourceContext) XXX_MessageName() string { + return "google.protobuf.SourceContext" +} +func init() { + proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext") +} + +func init() { + proto.RegisterFile("google/protobuf/source_context.proto", fileDescriptor_b686cdb126d509db) +} + +var fileDescriptor_b686cdb126d509db = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d, + 0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43, + 0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x49, + 0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0x3a, 0x19, 0x6f, 0x3c, 0x94, 0x63, + 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, + 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, + 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x4b, 0x38, 0x39, + 0x3f, 0x57, 0x0f, 0xcd, 0x56, 0x27, 0x21, 0x14, 0x3b, 0x03, 0x40, 0xc2, 0x01, 0x8c, 0x51, 0xac, + 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, + 0x34, 0x05, 0x40, 0x35, 0xe9, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, + 0x94, 0x25, 0xb1, 0x81, 0x4d, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x37, 0x2a, 0xa1, + 0xf9, 0x00, 0x00, 0x00, +} + +func (this *SourceContext) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.FileName != that1.FileName { + if this.FileName < that1.FileName { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *SourceContext) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FileName != that1.FileName { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *SourceContext) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.SourceContext{") + s = append(s, "FileName: "+fmt.Sprintf("%#v", this.FileName)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringSourceContext(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *SourceContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.FileName) > 0 { + i -= len(m.FileName) + copy(dAtA[i:], m.FileName) + i = encodeVarintSourceContext(dAtA, i, uint64(len(m.FileName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSourceContext(dAtA []byte, offset int, v uint64) int { + offset -= sovSourceContext(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedSourceContext(r randySourceContext, easy bool) *SourceContext { + this := &SourceContext{} + this.FileName = string(randStringSourceContext(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedSourceContext(r, 2) + } + return this +} + +type randySourceContext interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneSourceContext(r randySourceContext) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringSourceContext(r randySourceContext) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneSourceContext(r) + } + return string(tmps) +} +func randUnrecognizedSourceContext(r randySourceContext, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldSourceContext(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldSourceContext(dAtA []byte, r randySourceContext, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateSourceContext(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *SourceContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FileName) + if l > 0 { + n += 1 + l + sovSourceContext(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovSourceContext(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSourceContext(x uint64) (n int) { + return sovSourceContext(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SourceContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceContext{`, + `FileName:` + fmt.Sprintf("%v", this.FileName) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringSourceContext(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SourceContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSourceContext + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSourceContext + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FileName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSourceContext(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSourceContext + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSourceContext + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSourceContext(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSourceContext + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSourceContext + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSourceContext + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSourceContext = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSourceContext = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSourceContext = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/struct.pb.go b/vendor/github.com/gogo/protobuf/types/struct.pb.go new file mode 100644 index 00000000..cea553ee --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/struct.pb.go @@ -0,0 +1,2280 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package types + +import ( + bytes "bytes" + encoding_binary "encoding/binary" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} + +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} +func (*Struct) XXX_WellKnownType() string { return "Struct" } +func (m *Struct) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(m, src) +} +func (m *Struct) XXX_Size() int { + return m.Size() +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +func (*Struct) XXX_MessageName() string { + return "google.protobuf.Struct" +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{1} +} +func (*Value) XXX_WellKnownType() string { return "Value" } +func (m *Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(m, src) +} +func (m *Value) XXX_Size() int { + return m.Size() +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Kind interface { + isValue_Kind() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int + Compare(interface{}) int +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof" json:"null_value,omitempty"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof" json:"number_value,omitempty"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof" json:"string_value,omitempty"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof" json:"bool_value,omitempty"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof" json:"struct_value,omitempty"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof" json:"list_value,omitempty"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Value) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func (*Value) XXX_MessageName() string { + return "google.protobuf.Value" +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{2} +} +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(m, src) +} +func (m *ListValue) XXX_Size() int { + return m.Size() +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func (*ListValue) XXX_MessageName() string { + return "google.protobuf.ListValue" +} +func init() { + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") +} + +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) } + +var fileDescriptor_df322afd6c9fb402 = []byte{ + // 443 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xb1, 0x6f, 0xd3, 0x40, + 0x14, 0xc6, 0xfd, 0x9c, 0xc6, 0x22, 0xcf, 0xa8, 0x54, 0x87, 0x04, 0x51, 0x41, 0x47, 0x94, 0x2e, + 0x11, 0x42, 0xae, 0x14, 0x16, 0x44, 0x58, 0x88, 0x54, 0x5a, 0x89, 0xa8, 0x32, 0x86, 0x16, 0x89, + 0x25, 0xc2, 0xae, 0x1b, 0x59, 0xbd, 0xde, 0x55, 0xf6, 0x1d, 0x28, 0x1b, 0x0b, 0xff, 0x03, 0x33, + 0x13, 0x62, 0xe4, 0xaf, 0xe8, 0xc8, 0xc8, 0x48, 0xdc, 0x85, 0xb1, 0x63, 0x47, 0x74, 0x77, 0xb6, + 0x41, 0x8d, 0xb2, 0xf9, 0x7d, 0xf7, 0x7b, 0xdf, 0x7b, 0xdf, 0x33, 0xde, 0x9f, 0x09, 0x31, 0x63, + 0xe9, 0xf6, 0x59, 0x2e, 0xa4, 0x88, 0xd5, 0xf1, 0x76, 0x21, 0x73, 0x95, 0xc8, 0xc0, 0xd4, 0xe4, + 0x96, 0x7d, 0x0d, 0xea, 0xd7, 0xfe, 0x17, 0x40, 0xef, 0xb5, 0x21, 0xc8, 0x08, 0xbd, 0xe3, 0x2c, + 0x65, 0x47, 0x45, 0x17, 0x7a, 0xad, 0x81, 0x3f, 0xdc, 0x0a, 0xae, 0xc1, 0x81, 0x05, 0x83, 0x17, + 0x86, 0xda, 0xe1, 0x32, 0x9f, 0x47, 0x55, 0xcb, 0xe6, 0x2b, 0xf4, 0xff, 0x93, 0xc9, 0x06, 0xb6, + 0x4e, 0xd2, 0x79, 0x17, 0x7a, 0x30, 0xe8, 0x44, 0xfa, 0x93, 0x3c, 0xc2, 0xf6, 0x87, 0xf7, 0x4c, + 0xa5, 0x5d, 0xb7, 0x07, 0x03, 0x7f, 0x78, 0x67, 0xc9, 0xfc, 0x50, 0xbf, 0x46, 0x16, 0x7a, 0xea, + 0x3e, 0x81, 0xfe, 0x0f, 0x17, 0xdb, 0x46, 0x24, 0x23, 0x44, 0xae, 0x18, 0x9b, 0x5a, 0x03, 0x6d, + 0xba, 0x3e, 0xdc, 0x5c, 0x32, 0xd8, 0x57, 0x8c, 0x19, 0x7e, 0xcf, 0x89, 0x3a, 0xbc, 0x2e, 0xc8, + 0x16, 0xde, 0xe4, 0xea, 0x34, 0x4e, 0xf3, 0xe9, 0xbf, 0xf9, 0xb0, 0xe7, 0x44, 0xbe, 0x55, 0x1b, + 0xa8, 0x90, 0x79, 0xc6, 0x67, 0x15, 0xd4, 0xd2, 0x8b, 0x6b, 0xc8, 0xaa, 0x16, 0x7a, 0x80, 0x18, + 0x0b, 0x51, 0xaf, 0xb1, 0xd6, 0x83, 0xc1, 0x0d, 0x3d, 0x4a, 0x6b, 0x16, 0x78, 0x66, 0x5c, 0x54, + 0x22, 0x2b, 0xa4, 0x6d, 0xa2, 0xde, 0x5d, 0x71, 0xc7, 0xca, 0x5e, 0x25, 0xb2, 0x49, 0xc9, 0xb2, + 0xa2, 0xee, 0xf5, 0x4c, 0xef, 0x72, 0xca, 0x49, 0x56, 0xc8, 0x26, 0x25, 0xab, 0x8b, 0xb1, 0x87, + 0x6b, 0x27, 0x19, 0x3f, 0xea, 0x8f, 0xb0, 0xd3, 0x10, 0x24, 0x40, 0xcf, 0x98, 0xd5, 0x7f, 0x74, + 0xd5, 0xd1, 0x2b, 0xea, 0xe1, 0x3d, 0xec, 0x34, 0x47, 0x24, 0xeb, 0x88, 0xfb, 0x07, 0x93, 0xc9, + 0xf4, 0xf0, 0xf9, 0xe4, 0x60, 0x67, 0xc3, 0x19, 0x7f, 0x86, 0x5f, 0x0b, 0xea, 0x5c, 0x2e, 0x28, + 0x5c, 0x2d, 0x28, 0x7c, 0x2a, 0x29, 0x7c, 0x2b, 0x29, 0x9c, 0x97, 0x14, 0x7e, 0x96, 0x14, 0x7e, + 0x97, 0x14, 0xfe, 0x94, 0xd4, 0xb9, 0xd4, 0xfa, 0x05, 0x85, 0xf3, 0x0b, 0x0a, 0x78, 0x3b, 0x11, + 0xa7, 0xd7, 0x47, 0x8e, 0x7d, 0x9b, 0x3e, 0xd4, 0x75, 0x08, 0xef, 0xda, 0x72, 0x7e, 0x96, 0x16, + 0x57, 0x00, 0x5f, 0xdd, 0xd6, 0x6e, 0x38, 0xfe, 0xee, 0xd2, 0x5d, 0xdb, 0x10, 0xd6, 0x3b, 0xbe, + 0x4d, 0x19, 0x7b, 0xc9, 0xc5, 0x47, 0xfe, 0x46, 0x93, 0xb1, 0x67, 0x9c, 0x1e, 0xff, 0x0d, 0x00, + 0x00, 0xff, 0xff, 0x26, 0x30, 0xdb, 0xbe, 0xe9, 0x02, 0x00, 0x00, +} + +func (this *Struct) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Struct) + if !ok { + that2, ok := that.(Struct) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Fields) != len(that1.Fields) { + if len(this.Fields) < len(that1.Fields) { + return -1 + } + return 1 + } + for i := range this.Fields { + if c := this.Fields[i].Compare(that1.Fields[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if that1.Kind == nil { + if this.Kind != nil { + return 1 + } + } else if this.Kind == nil { + return -1 + } else { + thisType := -1 + switch this.Kind.(type) { + case *Value_NullValue: + thisType = 0 + case *Value_NumberValue: + thisType = 1 + case *Value_StringValue: + thisType = 2 + case *Value_BoolValue: + thisType = 3 + case *Value_StructValue: + thisType = 4 + case *Value_ListValue: + thisType = 5 + default: + panic(fmt.Sprintf("compare: unexpected type %T in oneof", this.Kind)) + } + that1Type := -1 + switch that1.Kind.(type) { + case *Value_NullValue: + that1Type = 0 + case *Value_NumberValue: + that1Type = 1 + case *Value_StringValue: + that1Type = 2 + case *Value_BoolValue: + that1Type = 3 + case *Value_StructValue: + that1Type = 4 + case *Value_ListValue: + that1Type = 5 + default: + panic(fmt.Sprintf("compare: unexpected type %T in oneof", that1.Kind)) + } + if thisType == that1Type { + if c := this.Kind.Compare(that1.Kind); c != 0 { + return c + } + } else if thisType < that1Type { + return -1 + } else if thisType > that1Type { + return 1 + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Value_NullValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_NullValue) + if !ok { + that2, ok := that.(Value_NullValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.NullValue != that1.NullValue { + if this.NullValue < that1.NullValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_NumberValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_NumberValue) + if !ok { + that2, ok := that.(Value_NumberValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.NumberValue != that1.NumberValue { + if this.NumberValue < that1.NumberValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_StringValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_StringValue) + if !ok { + that2, ok := that.(Value_StringValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.StringValue != that1.StringValue { + if this.StringValue < that1.StringValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_BoolValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_BoolValue) + if !ok { + that2, ok := that.(Value_BoolValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.BoolValue != that1.BoolValue { + if !this.BoolValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_StructValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_StructValue) + if !ok { + that2, ok := that.(Value_StructValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := this.StructValue.Compare(that1.StructValue); c != 0 { + return c + } + return 0 +} +func (this *Value_ListValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_ListValue) + if !ok { + that2, ok := that.(Value_ListValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := this.ListValue.Compare(that1.ListValue); c != 0 { + return c + } + return 0 +} +func (this *ListValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*ListValue) + if !ok { + that2, ok := that.(ListValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Values) != len(that1.Values) { + if len(this.Values) < len(that1.Values) { + return -1 + } + return 1 + } + for i := range this.Values { + if c := this.Values[i].Compare(that1.Values[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (x NullValue) String() string { + s, ok := NullValue_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Struct) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Struct) + if !ok { + that2, ok := that.(Struct) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Kind == nil { + if this.Kind != nil { + return false + } + } else if this.Kind == nil { + return false + } else if !this.Kind.Equal(that1.Kind) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Value_NullValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NullValue) + if !ok { + that2, ok := that.(Value_NullValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NullValue != that1.NullValue { + return false + } + return true +} +func (this *Value_NumberValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NumberValue) + if !ok { + that2, ok := that.(Value_NumberValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NumberValue != that1.NumberValue { + return false + } + return true +} +func (this *Value_StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StringValue) + if !ok { + that2, ok := that.(Value_StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.StringValue != that1.StringValue { + return false + } + return true +} +func (this *Value_BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_BoolValue) + if !ok { + that2, ok := that.(Value_BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BoolValue != that1.BoolValue { + return false + } + return true +} +func (this *Value_StructValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StructValue) + if !ok { + that2, ok := that.(Value_StructValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StructValue.Equal(that1.StructValue) { + return false + } + return true +} +func (this *Value_ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_ListValue) + if !ok { + that2, ok := that.(Value_ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ListValue.Equal(that1.ListValue) { + return false + } + return true +} +func (this *ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ListValue) + if !ok { + that2, ok := that.(ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Values) != len(that1.Values) { + return false + } + for i := range this.Values { + if !this.Values[i].Equal(that1.Values[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Struct) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Struct{") + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%#v: %#v,", k, this.Fields[k]) + } + mapStringForFields += "}" + if this.Fields != nil { + s = append(s, "Fields: "+mapStringForFields+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Value{") + if this.Kind != nil { + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value_NullValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NullValue{` + + `NullValue:` + fmt.Sprintf("%#v", this.NullValue) + `}`}, ", ") + return s +} +func (this *Value_NumberValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NumberValue{` + + `NumberValue:` + fmt.Sprintf("%#v", this.NumberValue) + `}`}, ", ") + return s +} +func (this *Value_StringValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StringValue{` + + `StringValue:` + fmt.Sprintf("%#v", this.StringValue) + `}`}, ", ") + return s +} +func (this *Value_BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_BoolValue{` + + `BoolValue:` + fmt.Sprintf("%#v", this.BoolValue) + `}`}, ", ") + return s +} +func (this *Value_StructValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StructValue{` + + `StructValue:` + fmt.Sprintf("%#v", this.StructValue) + `}`}, ", ") + return s +} +func (this *Value_ListValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_ListValue{` + + `ListValue:` + fmt.Sprintf("%#v", this.ListValue) + `}`}, ", ") + return s +} +func (this *ListValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.ListValue{") + if this.Values != nil { + s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringStruct(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Struct) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Struct) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Struct) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Fields) > 0 { + for k := range m.Fields { + v := m.Fields[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintStruct(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintStruct(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Kind != nil { + { + size := m.Kind.Size() + i -= size + if _, err := m.Kind.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Value_NullValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_NullValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintStruct(dAtA, i, uint64(m.NullValue)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Value_NumberValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_NumberValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Value_StringValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = encodeVarintStruct(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *Value_BoolValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *Value_StructValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_StructValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StructValue != nil { + { + size, err := m.StructValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Value_ListValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value_ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListValue != nil { + { + size, err := m.ListValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ListValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintStruct(dAtA []byte, offset int, v uint64) int { + offset -= sovStruct(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedStruct(r randyStruct, easy bool) *Struct { + this := &Struct{} + if r.Intn(5) == 0 { + v1 := r.Intn(10) + this.Fields = make(map[string]*Value) + for i := 0; i < v1; i++ { + this.Fields[randStringStruct(r)] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 2) + } + return this +} + +func NewPopulatedValue(r randyStruct, easy bool) *Value { + this := &Value{} + oneofNumber_Kind := []int32{1, 2, 3, 4, 5, 6}[r.Intn(6)] + switch oneofNumber_Kind { + case 1: + this.Kind = NewPopulatedValue_NullValue(r, easy) + case 2: + this.Kind = NewPopulatedValue_NumberValue(r, easy) + case 3: + this.Kind = NewPopulatedValue_StringValue(r, easy) + case 4: + this.Kind = NewPopulatedValue_BoolValue(r, easy) + case 5: + this.Kind = NewPopulatedValue_StructValue(r, easy) + case 6: + this.Kind = NewPopulatedValue_ListValue(r, easy) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 7) + } + return this +} + +func NewPopulatedValue_NullValue(r randyStruct, easy bool) *Value_NullValue { + this := &Value_NullValue{} + this.NullValue = NullValue([]int32{0}[r.Intn(1)]) + return this +} +func NewPopulatedValue_NumberValue(r randyStruct, easy bool) *Value_NumberValue { + this := &Value_NumberValue{} + this.NumberValue = float64(r.Float64()) + if r.Intn(2) == 0 { + this.NumberValue *= -1 + } + return this +} +func NewPopulatedValue_StringValue(r randyStruct, easy bool) *Value_StringValue { + this := &Value_StringValue{} + this.StringValue = string(randStringStruct(r)) + return this +} +func NewPopulatedValue_BoolValue(r randyStruct, easy bool) *Value_BoolValue { + this := &Value_BoolValue{} + this.BoolValue = bool(bool(r.Intn(2) == 0)) + return this +} +func NewPopulatedValue_StructValue(r randyStruct, easy bool) *Value_StructValue { + this := &Value_StructValue{} + this.StructValue = NewPopulatedStruct(r, easy) + return this +} +func NewPopulatedValue_ListValue(r randyStruct, easy bool) *Value_ListValue { + this := &Value_ListValue{} + this.ListValue = NewPopulatedListValue(r, easy) + return this +} +func NewPopulatedListValue(r randyStruct, easy bool) *ListValue { + this := &ListValue{} + if r.Intn(5) == 0 { + v2 := r.Intn(5) + this.Values = make([]*Value, v2) + for i := 0; i < v2; i++ { + this.Values[i] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 2) + } + return this +} + +type randyStruct interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneStruct(r randyStruct) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringStruct(r randyStruct) string { + v3 := r.Intn(100) + tmps := make([]rune, v3) + for i := 0; i < v3; i++ { + tmps[i] = randUTF8RuneStruct(r) + } + return string(tmps) +} +func randUnrecognizedStruct(r randyStruct, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldStruct(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldStruct(dAtA []byte, r randyStruct, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + v4 := r.Int63() + if r.Intn(2) == 0 { + v4 *= -1 + } + dAtA = encodeVarintPopulateStruct(dAtA, uint64(v4)) + case 1: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateStruct(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateStruct(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Struct) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fields) > 0 { + for k, v := range m.Fields { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovStruct(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovStruct(uint64(len(k))) + l + n += mapEntrySize + 1 + sovStruct(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != nil { + n += m.Kind.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Value_NullValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovStruct(uint64(m.NullValue)) + return n +} +func (m *Value_NumberValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Value_StringValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + sovStruct(uint64(l)) + return n +} +func (m *Value_BoolValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *Value_StructValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StructValue != nil { + l = m.StructValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *Value_ListValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListValue != nil { + l = m.ListValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *ListValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.Size() + n += 1 + l + sovStruct(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovStruct(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozStruct(x uint64) (n int) { + return sovStruct(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Struct) String() string { + if this == nil { + return "nil" + } + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%v: %v,", k, this.Fields[k]) + } + mapStringForFields += "}" + s := strings.Join([]string{`&Struct{`, + `Fields:` + mapStringForFields + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NullValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NullValue{`, + `NullValue:` + fmt.Sprintf("%v", this.NullValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NumberValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NumberValue{`, + `NumberValue:` + fmt.Sprintf("%v", this.NumberValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StringValue{`, + `StringValue:` + fmt.Sprintf("%v", this.StringValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_BoolValue{`, + `BoolValue:` + fmt.Sprintf("%v", this.BoolValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StructValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StructValue{`, + `StructValue:` + strings.Replace(fmt.Sprintf("%v", this.StructValue), "Struct", "Struct", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Value_ListValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_ListValue{`, + `ListValue:` + strings.Replace(fmt.Sprintf("%v", this.ListValue), "ListValue", "ListValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForValues := "[]*Value{" + for _, f := range this.Values { + repeatedStringForValues += strings.Replace(f.String(), "Value", "Value", 1) + "," + } + repeatedStringForValues += "}" + s := strings.Join([]string{`&ListValue{`, + `Values:` + repeatedStringForValues + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringStruct(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Struct) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Struct: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Struct: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fields == nil { + m.Fields = make(map[string]*Value) + } + var mapkey string + var mapvalue *Value + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStruct + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthStruct + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthStruct + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthStruct + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Value{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Fields[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NullValue", wireType) + } + var v NullValue + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= NullValue(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Kind = &Value_NullValue{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Kind = &Value_NumberValue{float64(math.Float64frombits(v))} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = &Value_StringValue{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &Value_BoolValue{b} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StructValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Struct{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_StructValue{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ListValue{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_ListValue{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &Value{}) + if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStruct(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthStruct + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupStruct + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthStruct + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthStruct = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStruct = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupStruct = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.go b/vendor/github.com/gogo/protobuf/types/timestamp.go new file mode 100644 index 00000000..232ada57 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp.go @@ -0,0 +1,130 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func TimestampFromProto(ts *Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*Timestamp, error) { + ts := &Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *Timestamp) string { + t, err := TimestampFromProto(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.pb.go b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go new file mode 100644 index 00000000..b8187526 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go @@ -0,0 +1,542 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_292007bbfe81227e, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func (*Timestamp) XXX_MessageName() string { + return "google.protobuf.Timestamp" +} +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } + +var fileDescriptor_292007bbfe81227e = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x03, 0xe3, 0x8d, + 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, + 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c, + 0xe3, 0x8a, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, + 0x59, 0xee, 0xc4, 0x07, 0xb7, 0x3a, 0x00, 0x24, 0x14, 0xc0, 0x18, 0xc5, 0x5a, 0x52, 0x59, 0x90, + 0x5a, 0xfc, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, + 0x9e, 0x00, 0xa8, 0x1e, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90, + 0xca, 0x24, 0x36, 0xb0, 0x61, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x23, 0x83, 0xdd, + 0xfa, 0x00, 0x00, 0x00, +} + +func (this *Timestamp) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Timestamp) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Timestamp) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Timestamp{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTimestamp(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Timestamp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Nanos != 0 { + i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int { + offset -= sovTimestamp(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Timestamp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovTimestamp(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovTimestamp(uint64(m.Nanos)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTimestamp(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTimestamp(x uint64) (n int) { + return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Timestamp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTimestamp(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTimestamp + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTimestamp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTimestamp(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTimestamp + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTimestamp + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTimestamp + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTimestamp = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go new file mode 100644 index 00000000..e03fa131 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go @@ -0,0 +1,94 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "time" +) + +func NewPopulatedTimestamp(r interface { + Int63() int64 +}, easy bool) *Timestamp { + this := &Timestamp{} + ns := int64(r.Int63()) + this.Seconds = ns / 1e9 + this.Nanos = int32(ns % 1e9) + return this +} + +func (ts *Timestamp) String() string { + return TimestampString(ts) +} + +func NewPopulatedStdTime(r interface { + Int63() int64 +}, easy bool) *time.Time { + timestamp := NewPopulatedTimestamp(r, easy) + t, err := TimestampFromProto(timestamp) + if err != nil { + return nil + } + return &t +} + +func SizeOfStdTime(t time.Time) int { + ts, err := TimestampProto(t) + if err != nil { + return 0 + } + return ts.Size() +} + +func StdTimeMarshal(t time.Time) ([]byte, error) { + size := SizeOfStdTime(t) + buf := make([]byte, size) + _, err := StdTimeMarshalTo(t, buf) + return buf, err +} + +func StdTimeMarshalTo(t time.Time, data []byte) (int, error) { + ts, err := TimestampProto(t) + if err != nil { + return 0, err + } + return ts.MarshalTo(data) +} + +func StdTimeUnmarshal(t *time.Time, data []byte) error { + ts := &Timestamp{} + if err := ts.Unmarshal(data); err != nil { + return err + } + tt, err := TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil +} diff --git a/vendor/github.com/gogo/protobuf/types/type.pb.go b/vendor/github.com/gogo/protobuf/types/type.pb.go new file mode 100644 index 00000000..13b7ec02 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/type.pb.go @@ -0,0 +1,3370 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/type.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// The syntax in which a protocol buffer element is defined. +type Syntax int32 + +const ( + // Syntax `proto2`. + Syntax_SYNTAX_PROTO2 Syntax = 0 + // Syntax `proto3`. + Syntax_SYNTAX_PROTO3 Syntax = 1 +) + +var Syntax_name = map[int32]string{ + 0: "SYNTAX_PROTO2", + 1: "SYNTAX_PROTO3", +} + +var Syntax_value = map[string]int32{ + "SYNTAX_PROTO2": 0, + "SYNTAX_PROTO3": 1, +} + +func (Syntax) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{0} +} + +// Basic field types. +type Field_Kind int32 + +const ( + // Field type unknown. + Field_TYPE_UNKNOWN Field_Kind = 0 + // Field type double. + Field_TYPE_DOUBLE Field_Kind = 1 + // Field type float. + Field_TYPE_FLOAT Field_Kind = 2 + // Field type int64. + Field_TYPE_INT64 Field_Kind = 3 + // Field type uint64. + Field_TYPE_UINT64 Field_Kind = 4 + // Field type int32. + Field_TYPE_INT32 Field_Kind = 5 + // Field type fixed64. + Field_TYPE_FIXED64 Field_Kind = 6 + // Field type fixed32. + Field_TYPE_FIXED32 Field_Kind = 7 + // Field type bool. + Field_TYPE_BOOL Field_Kind = 8 + // Field type string. + Field_TYPE_STRING Field_Kind = 9 + // Field type group. Proto2 syntax only, and deprecated. + Field_TYPE_GROUP Field_Kind = 10 + // Field type message. + Field_TYPE_MESSAGE Field_Kind = 11 + // Field type bytes. + Field_TYPE_BYTES Field_Kind = 12 + // Field type uint32. + Field_TYPE_UINT32 Field_Kind = 13 + // Field type enum. + Field_TYPE_ENUM Field_Kind = 14 + // Field type sfixed32. + Field_TYPE_SFIXED32 Field_Kind = 15 + // Field type sfixed64. + Field_TYPE_SFIXED64 Field_Kind = 16 + // Field type sint32. + Field_TYPE_SINT32 Field_Kind = 17 + // Field type sint64. + Field_TYPE_SINT64 Field_Kind = 18 +) + +var Field_Kind_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var Field_Kind_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (Field_Kind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1, 0} +} + +// Whether a field is optional, required, or repeated. +type Field_Cardinality int32 + +const ( + // For fields with unknown cardinality. + Field_CARDINALITY_UNKNOWN Field_Cardinality = 0 + // For optional fields. + Field_CARDINALITY_OPTIONAL Field_Cardinality = 1 + // For required fields. Proto2 syntax only. + Field_CARDINALITY_REQUIRED Field_Cardinality = 2 + // For repeated fields. + Field_CARDINALITY_REPEATED Field_Cardinality = 3 +) + +var Field_Cardinality_name = map[int32]string{ + 0: "CARDINALITY_UNKNOWN", + 1: "CARDINALITY_OPTIONAL", + 2: "CARDINALITY_REQUIRED", + 3: "CARDINALITY_REPEATED", +} + +var Field_Cardinality_value = map[string]int32{ + "CARDINALITY_UNKNOWN": 0, + "CARDINALITY_OPTIONAL": 1, + "CARDINALITY_REQUIRED": 2, + "CARDINALITY_REPEATED": 3, +} + +func (Field_Cardinality) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1, 1} +} + +// A protocol buffer message type. +type Type struct { + // The fully qualified message name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The list of fields. + Fields []*Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` + // The list of types appearing in `oneof` definitions in this type. + Oneofs []string `protobuf:"bytes,3,rep,name=oneofs,proto3" json:"oneofs,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,6,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Type) Reset() { *m = Type{} } +func (*Type) ProtoMessage() {} +func (*Type) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{0} +} +func (m *Type) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Type) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Type.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Type) XXX_Merge(src proto.Message) { + xxx_messageInfo_Type.Merge(m, src) +} +func (m *Type) XXX_Size() int { + return m.Size() +} +func (m *Type) XXX_DiscardUnknown() { + xxx_messageInfo_Type.DiscardUnknown(m) +} + +var xxx_messageInfo_Type proto.InternalMessageInfo + +func (m *Type) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Type) GetFields() []*Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *Type) GetOneofs() []string { + if m != nil { + return m.Oneofs + } + return nil +} + +func (m *Type) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Type) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Type) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Type) XXX_MessageName() string { + return "google.protobuf.Type" +} + +// A single field of a message type. +type Field struct { + // The field type. + Kind Field_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=google.protobuf.Field_Kind" json:"kind,omitempty"` + // The field cardinality. + Cardinality Field_Cardinality `protobuf:"varint,2,opt,name=cardinality,proto3,enum=google.protobuf.Field_Cardinality" json:"cardinality,omitempty"` + // The field number. + Number int32 `protobuf:"varint,3,opt,name=number,proto3" json:"number,omitempty"` + // The field name. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + TypeUrl string `protobuf:"bytes,6,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + OneofIndex int32 `protobuf:"varint,7,opt,name=oneof_index,json=oneofIndex,proto3" json:"oneof_index,omitempty"` + // Whether to use alternative packed wire representation. + Packed bool `protobuf:"varint,8,opt,name=packed,proto3" json:"packed,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,9,rep,name=options,proto3" json:"options,omitempty"` + // The field JSON name. + JsonName string `protobuf:"bytes,10,opt,name=json_name,json=jsonName,proto3" json:"json_name,omitempty"` + // The string value of the default value of this field. Proto2 syntax only. + DefaultValue string `protobuf:"bytes,11,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Field) Reset() { *m = Field{} } +func (*Field) ProtoMessage() {} +func (*Field) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1} +} +func (m *Field) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Field) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Field.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Field) XXX_Merge(src proto.Message) { + xxx_messageInfo_Field.Merge(m, src) +} +func (m *Field) XXX_Size() int { + return m.Size() +} +func (m *Field) XXX_DiscardUnknown() { + xxx_messageInfo_Field.DiscardUnknown(m) +} + +var xxx_messageInfo_Field proto.InternalMessageInfo + +func (m *Field) GetKind() Field_Kind { + if m != nil { + return m.Kind + } + return Field_TYPE_UNKNOWN +} + +func (m *Field) GetCardinality() Field_Cardinality { + if m != nil { + return m.Cardinality + } + return Field_CARDINALITY_UNKNOWN +} + +func (m *Field) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *Field) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Field) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Field) GetOneofIndex() int32 { + if m != nil { + return m.OneofIndex + } + return 0 +} + +func (m *Field) GetPacked() bool { + if m != nil { + return m.Packed + } + return false +} + +func (m *Field) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Field) GetJsonName() string { + if m != nil { + return m.JsonName + } + return "" +} + +func (m *Field) GetDefaultValue() string { + if m != nil { + return m.DefaultValue + } + return "" +} + +func (*Field) XXX_MessageName() string { + return "google.protobuf.Field" +} + +// Enum type definition. +type Enum struct { + // Enum type name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value definitions. + Enumvalue []*EnumValue `protobuf:"bytes,2,rep,name=enumvalue,proto3" json:"enumvalue,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,4,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,5,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Enum) Reset() { *m = Enum{} } +func (*Enum) ProtoMessage() {} +func (*Enum) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{2} +} +func (m *Enum) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Enum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Enum.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Enum) XXX_Merge(src proto.Message) { + xxx_messageInfo_Enum.Merge(m, src) +} +func (m *Enum) XXX_Size() int { + return m.Size() +} +func (m *Enum) XXX_DiscardUnknown() { + xxx_messageInfo_Enum.DiscardUnknown(m) +} + +var xxx_messageInfo_Enum proto.InternalMessageInfo + +func (m *Enum) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Enum) GetEnumvalue() []*EnumValue { + if m != nil { + return m.Enumvalue + } + return nil +} + +func (m *Enum) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Enum) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Enum) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Enum) XXX_MessageName() string { + return "google.protobuf.Enum" +} + +// Enum value definition. +type EnumValue struct { + // Enum value name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value number. + Number int32 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValue) Reset() { *m = EnumValue{} } +func (*EnumValue) ProtoMessage() {} +func (*EnumValue) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{3} +} +func (m *EnumValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnumValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnumValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnumValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValue.Merge(m, src) +} +func (m *EnumValue) XXX_Size() int { + return m.Size() +} +func (m *EnumValue) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValue.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValue proto.InternalMessageInfo + +func (m *EnumValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnumValue) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *EnumValue) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (*EnumValue) XXX_MessageName() string { + return "google.protobuf.EnumValue" +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +type Option struct { + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Option) Reset() { *m = Option{} } +func (*Option) ProtoMessage() {} +func (*Option) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{4} +} +func (m *Option) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Option) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Option.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Option) XXX_Merge(src proto.Message) { + xxx_messageInfo_Option.Merge(m, src) +} +func (m *Option) XXX_Size() int { + return m.Size() +} +func (m *Option) XXX_DiscardUnknown() { + xxx_messageInfo_Option.DiscardUnknown(m) +} + +var xxx_messageInfo_Option proto.InternalMessageInfo + +func (m *Option) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Option) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +func (*Option) XXX_MessageName() string { + return "google.protobuf.Option" +} +func init() { + proto.RegisterEnum("google.protobuf.Syntax", Syntax_name, Syntax_value) + proto.RegisterEnum("google.protobuf.Field_Kind", Field_Kind_name, Field_Kind_value) + proto.RegisterEnum("google.protobuf.Field_Cardinality", Field_Cardinality_name, Field_Cardinality_value) + proto.RegisterType((*Type)(nil), "google.protobuf.Type") + proto.RegisterType((*Field)(nil), "google.protobuf.Field") + proto.RegisterType((*Enum)(nil), "google.protobuf.Enum") + proto.RegisterType((*EnumValue)(nil), "google.protobuf.EnumValue") + proto.RegisterType((*Option)(nil), "google.protobuf.Option") +} + +func init() { proto.RegisterFile("google/protobuf/type.proto", fileDescriptor_dd271cc1e348c538) } + +var fileDescriptor_dd271cc1e348c538 = []byte{ + // 840 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x73, 0xda, 0x46, + 0x14, 0xf6, 0x0a, 0x21, 0xa3, 0x87, 0xc1, 0x9b, 0x4d, 0x26, 0x51, 0x9c, 0x19, 0x95, 0xa1, 0x3d, + 0x30, 0x39, 0xe0, 0x29, 0x78, 0x3c, 0xbd, 0x82, 0x91, 0x29, 0x63, 0x22, 0xa9, 0x8b, 0x68, 0xe2, + 0x5e, 0x18, 0x0c, 0x72, 0x86, 0x44, 0xac, 0x18, 0x24, 0x5a, 0x73, 0xeb, 0x4c, 0xcf, 0xfd, 0x27, + 0x7a, 0xea, 0xf4, 0xdc, 0x3f, 0xc2, 0xc7, 0x1e, 0x7b, 0xac, 0xc9, 0xa5, 0xc7, 0x1c, 0x73, 0x6b, + 0x67, 0x57, 0x20, 0x8b, 0x1f, 0x9d, 0x49, 0xdb, 0x1b, 0xef, 0xfb, 0xbe, 0xf7, 0x73, 0x9f, 0x1e, + 0x70, 0xf4, 0xda, 0xf7, 0x5f, 0x7b, 0xee, 0xf1, 0x64, 0xea, 0x87, 0xfe, 0xd5, 0xec, 0xfa, 0x38, + 0x9c, 0x4f, 0xdc, 0xb2, 0xb0, 0xc8, 0x61, 0xc4, 0x95, 0x57, 0xdc, 0xd1, 0xd3, 0x4d, 0x71, 0x9f, + 0xcd, 0x23, 0xf6, 0xe8, 0xb3, 0x4d, 0x2a, 0xf0, 0x67, 0xd3, 0x81, 0xdb, 0x1b, 0xf8, 0x2c, 0x74, + 0x6f, 0xc2, 0x48, 0x55, 0xfc, 0x51, 0x02, 0xd9, 0x99, 0x4f, 0x5c, 0x42, 0x40, 0x66, 0xfd, 0xb1, + 0xab, 0xa1, 0x02, 0x2a, 0xa9, 0x54, 0xfc, 0x26, 0x65, 0x50, 0xae, 0x47, 0xae, 0x37, 0x0c, 0x34, + 0xa9, 0x90, 0x2a, 0x65, 0x2b, 0x8f, 0xcb, 0x1b, 0xf9, 0xcb, 0xe7, 0x9c, 0xa6, 0x4b, 0x15, 0x79, + 0x0c, 0x8a, 0xcf, 0x5c, 0xff, 0x3a, 0xd0, 0x52, 0x85, 0x54, 0x49, 0xa5, 0x4b, 0x8b, 0x7c, 0x0e, + 0xfb, 0xfe, 0x24, 0x1c, 0xf9, 0x2c, 0xd0, 0x64, 0x11, 0xe8, 0xc9, 0x56, 0x20, 0x4b, 0xf0, 0x74, + 0xa5, 0x23, 0x06, 0xe4, 0xd7, 0xeb, 0xd5, 0xd2, 0x05, 0x54, 0xca, 0x56, 0xf4, 0x2d, 0xcf, 0x8e, + 0x90, 0x9d, 0x45, 0x2a, 0x9a, 0x0b, 0x92, 0x26, 0x39, 0x06, 0x25, 0x98, 0xb3, 0xb0, 0x7f, 0xa3, + 0x29, 0x05, 0x54, 0xca, 0xef, 0x48, 0xdc, 0x11, 0x34, 0x5d, 0xca, 0x8a, 0xbf, 0x2a, 0x90, 0x16, + 0x4d, 0x91, 0x63, 0x90, 0xdf, 0x8e, 0xd8, 0x50, 0x0c, 0x24, 0x5f, 0x79, 0xb6, 0xbb, 0xf5, 0xf2, + 0xc5, 0x88, 0x0d, 0xa9, 0x10, 0x92, 0x06, 0x64, 0x07, 0xfd, 0xe9, 0x70, 0xc4, 0xfa, 0xde, 0x28, + 0x9c, 0x6b, 0x92, 0xf0, 0x2b, 0xfe, 0x83, 0xdf, 0xd9, 0xbd, 0x92, 0x26, 0xdd, 0xf8, 0x0c, 0xd9, + 0x6c, 0x7c, 0xe5, 0x4e, 0xb5, 0x54, 0x01, 0x95, 0xd2, 0x74, 0x69, 0xc5, 0xef, 0x23, 0x27, 0xde, + 0xe7, 0x29, 0x64, 0xf8, 0x72, 0xf4, 0x66, 0x53, 0x4f, 0xf4, 0xa7, 0xd2, 0x7d, 0x6e, 0x77, 0xa7, + 0x1e, 0xf9, 0x04, 0xb2, 0x62, 0xf8, 0xbd, 0x11, 0x1b, 0xba, 0x37, 0xda, 0xbe, 0x88, 0x05, 0x02, + 0x6a, 0x71, 0x84, 0xe7, 0x99, 0xf4, 0x07, 0x6f, 0xdd, 0xa1, 0x96, 0x29, 0xa0, 0x52, 0x86, 0x2e, + 0xad, 0xe4, 0x5b, 0xa9, 0x1f, 0xf9, 0x56, 0xcf, 0x40, 0x7d, 0x13, 0xf8, 0xac, 0x27, 0xea, 0x03, + 0x51, 0x47, 0x86, 0x03, 0x26, 0xaf, 0xf1, 0x53, 0xc8, 0x0d, 0xdd, 0xeb, 0xfe, 0xcc, 0x0b, 0x7b, + 0xdf, 0xf6, 0xbd, 0x99, 0xab, 0x65, 0x85, 0xe0, 0x60, 0x09, 0x7e, 0xcd, 0xb1, 0xe2, 0xad, 0x04, + 0x32, 0x9f, 0x24, 0xc1, 0x70, 0xe0, 0x5c, 0xda, 0x46, 0xaf, 0x6b, 0x5e, 0x98, 0xd6, 0x4b, 0x13, + 0xef, 0x91, 0x43, 0xc8, 0x0a, 0xa4, 0x61, 0x75, 0xeb, 0x6d, 0x03, 0x23, 0x92, 0x07, 0x10, 0xc0, + 0x79, 0xdb, 0xaa, 0x39, 0x58, 0x8a, 0xed, 0x96, 0xe9, 0x9c, 0x9e, 0xe0, 0x54, 0xec, 0xd0, 0x8d, + 0x00, 0x39, 0x29, 0xa8, 0x56, 0x70, 0x3a, 0xce, 0x71, 0xde, 0x7a, 0x65, 0x34, 0x4e, 0x4f, 0xb0, + 0xb2, 0x8e, 0x54, 0x2b, 0x78, 0x9f, 0xe4, 0x40, 0x15, 0x48, 0xdd, 0xb2, 0xda, 0x38, 0x13, 0xc7, + 0xec, 0x38, 0xb4, 0x65, 0x36, 0xb1, 0x1a, 0xc7, 0x6c, 0x52, 0xab, 0x6b, 0x63, 0x88, 0x23, 0xbc, + 0x30, 0x3a, 0x9d, 0x5a, 0xd3, 0xc0, 0xd9, 0x58, 0x51, 0xbf, 0x74, 0x8c, 0x0e, 0x3e, 0x58, 0x2b, + 0xab, 0x5a, 0xc1, 0xb9, 0x38, 0x85, 0x61, 0x76, 0x5f, 0xe0, 0x3c, 0x79, 0x00, 0xb9, 0x28, 0xc5, + 0xaa, 0x88, 0xc3, 0x0d, 0xe8, 0xf4, 0x04, 0xe3, 0xfb, 0x42, 0xa2, 0x28, 0x0f, 0xd6, 0x80, 0xd3, + 0x13, 0x4c, 0x8a, 0x21, 0x64, 0x13, 0xbb, 0x45, 0x9e, 0xc0, 0xc3, 0xb3, 0x1a, 0x6d, 0xb4, 0xcc, + 0x5a, 0xbb, 0xe5, 0x5c, 0x26, 0xe6, 0xaa, 0xc1, 0xa3, 0x24, 0x61, 0xd9, 0x4e, 0xcb, 0x32, 0x6b, + 0x6d, 0x8c, 0x36, 0x19, 0x6a, 0x7c, 0xd5, 0x6d, 0x51, 0xa3, 0x81, 0xa5, 0x6d, 0xc6, 0x36, 0x6a, + 0x8e, 0xd1, 0xc0, 0xa9, 0xe2, 0x5f, 0x08, 0x64, 0x83, 0xcd, 0xc6, 0x3b, 0xcf, 0xc8, 0x17, 0xa0, + 0xba, 0x6c, 0x36, 0x8e, 0x9e, 0x3f, 0xba, 0x24, 0x47, 0x5b, 0x4b, 0xc5, 0xbd, 0xc5, 0x32, 0xd0, + 0x7b, 0x71, 0x72, 0x19, 0x53, 0xff, 0xf9, 0x70, 0xc8, 0xff, 0xef, 0x70, 0xa4, 0x3f, 0xee, 0x70, + 0xbc, 0x01, 0x35, 0x6e, 0x61, 0xe7, 0x14, 0xee, 0x3f, 0x6c, 0x69, 0xed, 0xc3, 0xfe, 0xf7, 0x3d, + 0x16, 0xbf, 0x04, 0x25, 0x82, 0x76, 0x26, 0x7a, 0x0e, 0xe9, 0xd5, 0xa8, 0x79, 0xe3, 0x8f, 0xb6, + 0xc2, 0xd5, 0xd8, 0x9c, 0x46, 0x92, 0xe7, 0x65, 0x50, 0xa2, 0x3e, 0xf8, 0xb2, 0x75, 0x2e, 0x4d, + 0xa7, 0xf6, 0xaa, 0x67, 0x53, 0xcb, 0xb1, 0x2a, 0x78, 0x6f, 0x13, 0xaa, 0x62, 0x54, 0xff, 0x01, + 0xfd, 0x7e, 0xa7, 0xef, 0xbd, 0xbf, 0xd3, 0xd1, 0x87, 0x3b, 0x1d, 0x7d, 0xbf, 0xd0, 0xd1, 0xcf, + 0x0b, 0x1d, 0xdd, 0x2e, 0x74, 0xf4, 0xdb, 0x42, 0x47, 0x7f, 0x2c, 0x74, 0xf4, 0xe7, 0x42, 0xdf, + 0x7b, 0xcf, 0xf1, 0x77, 0x3a, 0xba, 0x7d, 0xa7, 0x23, 0x78, 0x38, 0xf0, 0xc7, 0x9b, 0x25, 0xd4, + 0x55, 0xfe, 0x9f, 0x63, 0x73, 0xcb, 0x46, 0xdf, 0xa4, 0xf9, 0xd1, 0x0a, 0x3e, 0x20, 0xf4, 0x93, + 0x94, 0x6a, 0xda, 0xf5, 0x5f, 0x24, 0xbd, 0x19, 0xc9, 0xed, 0x55, 0xc5, 0x2f, 0x5d, 0xcf, 0xbb, + 0x60, 0xfe, 0x77, 0x8c, 0xbb, 0x05, 0x57, 0x8a, 0x88, 0x53, 0xfd, 0x3b, 0x00, 0x00, 0xff, 0xff, + 0xbc, 0x2a, 0x5e, 0x82, 0x2b, 0x07, 0x00, 0x00, +} + +func (this *Type) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Fields) != len(that1.Fields) { + if len(this.Fields) < len(that1.Fields) { + return -1 + } + return 1 + } + for i := range this.Fields { + if c := this.Fields[i].Compare(that1.Fields[i]); c != 0 { + return c + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + if len(this.Oneofs) < len(that1.Oneofs) { + return -1 + } + return 1 + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + if this.Oneofs[i] < that1.Oneofs[i] { + return -1 + } + return 1 + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Field) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Kind != that1.Kind { + if this.Kind < that1.Kind { + return -1 + } + return 1 + } + if this.Cardinality != that1.Cardinality { + if this.Cardinality < that1.Cardinality { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if this.OneofIndex != that1.OneofIndex { + if this.OneofIndex < that1.OneofIndex { + return -1 + } + return 1 + } + if this.Packed != that1.Packed { + if !this.Packed { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.JsonName != that1.JsonName { + if this.JsonName < that1.JsonName { + return -1 + } + return 1 + } + if this.DefaultValue != that1.DefaultValue { + if this.DefaultValue < that1.DefaultValue { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Enum) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + if len(this.Enumvalue) < len(that1.Enumvalue) { + return -1 + } + return 1 + } + for i := range this.Enumvalue { + if c := this.Enumvalue[i].Compare(that1.Enumvalue[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *EnumValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Option) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if c := this.Value.Compare(that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (x Syntax) String() string { + s, ok := Syntax_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Kind) String() string { + s, ok := Field_Kind_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Cardinality) String() string { + s, ok := Field_Cardinality_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Type) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + return false + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Field) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Kind != that1.Kind { + return false + } + if this.Cardinality != that1.Cardinality { + return false + } + if this.Number != that1.Number { + return false + } + if this.Name != that1.Name { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if this.OneofIndex != that1.OneofIndex { + return false + } + if this.Packed != that1.Packed { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.JsonName != that1.JsonName { + return false + } + if this.DefaultValue != that1.DefaultValue { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Enum) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + return false + } + for i := range this.Enumvalue { + if !this.Enumvalue[i].Equal(that1.Enumvalue[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *EnumValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Number != that1.Number { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Option) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if !this.Value.Equal(that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Type) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Type{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Fields != nil { + s = append(s, "Fields: "+fmt.Sprintf("%#v", this.Fields)+",\n") + } + s = append(s, "Oneofs: "+fmt.Sprintf("%#v", this.Oneofs)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Field) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&types.Field{") + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "OneofIndex: "+fmt.Sprintf("%#v", this.OneofIndex)+",\n") + s = append(s, "Packed: "+fmt.Sprintf("%#v", this.Packed)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "JsonName: "+fmt.Sprintf("%#v", this.JsonName)+",\n") + s = append(s, "DefaultValue: "+fmt.Sprintf("%#v", this.DefaultValue)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Enum) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&types.Enum{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Enumvalue != nil { + s = append(s, "Enumvalue: "+fmt.Sprintf("%#v", this.Enumvalue)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&types.EnumValue{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Option) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Option{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringType(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Type) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Type) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Type) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x30 + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Oneofs) > 0 { + for iNdEx := len(m.Oneofs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Oneofs[iNdEx]) + copy(dAtA[i:], m.Oneofs[iNdEx]) + i = encodeVarintType(dAtA, i, uint64(len(m.Oneofs[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Field) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Field) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Field) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = encodeVarintType(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x5a + } + if len(m.JsonName) > 0 { + i -= len(m.JsonName) + copy(dAtA[i:], m.JsonName) + i = encodeVarintType(dAtA, i, uint64(len(m.JsonName))) + i-- + dAtA[i] = 0x52 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.Packed { + i-- + if m.Packed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.OneofIndex != 0 { + i = encodeVarintType(dAtA, i, uint64(m.OneofIndex)) + i-- + dAtA[i] = 0x38 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = encodeVarintType(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x32 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + } + if m.Number != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Number)) + i-- + dAtA[i] = 0x18 + } + if m.Cardinality != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Cardinality)) + i-- + dAtA[i] = 0x10 + } + if m.Kind != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Kind)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Enum) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Enum) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Enum) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x28 + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Enumvalue) > 0 { + for iNdEx := len(m.Enumvalue) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Enumvalue[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnumValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnumValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnumValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Number != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Number)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Option) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Option) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Option) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintType(dAtA []byte, offset int, v uint64) int { + offset -= sovType(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedType(r randyType, easy bool) *Type { + this := &Type{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + v1 := r.Intn(5) + this.Fields = make([]*Field, v1) + for i := 0; i < v1; i++ { + this.Fields[i] = NewPopulatedField(r, easy) + } + } + v2 := r.Intn(10) + this.Oneofs = make([]string, v2) + for i := 0; i < v2; i++ { + this.Oneofs[i] = string(randStringType(r)) + } + if r.Intn(5) != 0 { + v3 := r.Intn(5) + this.Options = make([]*Option, v3) + for i := 0; i < v3; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 7) + } + return this +} + +func NewPopulatedField(r randyType, easy bool) *Field { + this := &Field{} + this.Kind = Field_Kind([]int32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}[r.Intn(19)]) + this.Cardinality = Field_Cardinality([]int32{0, 1, 2, 3}[r.Intn(4)]) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + this.Name = string(randStringType(r)) + this.TypeUrl = string(randStringType(r)) + this.OneofIndex = int32(r.Int31()) + if r.Intn(2) == 0 { + this.OneofIndex *= -1 + } + this.Packed = bool(bool(r.Intn(2) == 0)) + if r.Intn(5) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.JsonName = string(randStringType(r)) + this.DefaultValue = string(randStringType(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 12) + } + return this +} + +func NewPopulatedEnum(r randyType, easy bool) *Enum { + this := &Enum{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + v5 := r.Intn(5) + this.Enumvalue = make([]*EnumValue, v5) + for i := 0; i < v5; i++ { + this.Enumvalue[i] = NewPopulatedEnumValue(r, easy) + } + } + if r.Intn(5) != 0 { + v6 := r.Intn(5) + this.Options = make([]*Option, v6) + for i := 0; i < v6; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 6) + } + return this +} + +func NewPopulatedEnumValue(r randyType, easy bool) *EnumValue { + this := &EnumValue{} + this.Name = string(randStringType(r)) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + if r.Intn(5) != 0 { + v7 := r.Intn(5) + this.Options = make([]*Option, v7) + for i := 0; i < v7; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 4) + } + return this +} + +func NewPopulatedOption(r randyType, easy bool) *Option { + this := &Option{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + this.Value = NewPopulatedAny(r, easy) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 3) + } + return this +} + +type randyType interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneType(r randyType) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringType(r randyType) string { + v8 := r.Intn(100) + tmps := make([]rune, v8) + for i := 0; i < v8; i++ { + tmps[i] = randUTF8RuneType(r) + } + return string(tmps) +} +func randUnrecognizedType(r randyType, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldType(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldType(dAtA []byte, r randyType, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + v9 := r.Int63() + if r.Intn(2) == 0 { + v9 *= -1 + } + dAtA = encodeVarintPopulateType(dAtA, uint64(v9)) + case 1: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateType(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateType(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Type) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Oneofs) > 0 { + for _, s := range m.Oneofs { + l = len(s) + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Field) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != 0 { + n += 1 + sovType(uint64(m.Kind)) + } + if m.Cardinality != 0 { + n += 1 + sovType(uint64(m.Cardinality)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.OneofIndex != 0 { + n += 1 + sovType(uint64(m.OneofIndex)) + } + if m.Packed { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + l = len(m.JsonName) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Enum) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Enumvalue) > 0 { + for _, e := range m.Enumvalue { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnumValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Option) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovType(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozType(x uint64) (n int) { + return sovType(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Type) String() string { + if this == nil { + return "nil" + } + repeatedStringForFields := "[]*Field{" + for _, f := range this.Fields { + repeatedStringForFields += strings.Replace(f.String(), "Field", "Field", 1) + "," + } + repeatedStringForFields += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Type{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Fields:` + repeatedStringForFields + `,`, + `Oneofs:` + fmt.Sprintf("%v", this.Oneofs) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Field) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Field{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `OneofIndex:` + fmt.Sprintf("%v", this.OneofIndex) + `,`, + `Packed:` + fmt.Sprintf("%v", this.Packed) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `JsonName:` + fmt.Sprintf("%v", this.JsonName) + `,`, + `DefaultValue:` + fmt.Sprintf("%v", this.DefaultValue) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Enum) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnumvalue := "[]*EnumValue{" + for _, f := range this.Enumvalue { + repeatedStringForEnumvalue += strings.Replace(f.String(), "EnumValue", "EnumValue", 1) + "," + } + repeatedStringForEnumvalue += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Enum{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Enumvalue:` + repeatedStringForEnumvalue + `,`, + `Options:` + repeatedStringForOptions + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *EnumValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&EnumValue{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Option) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Option{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Any", "Any", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringType(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Type) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Type: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Type: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Oneofs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Oneofs = append(m.Oneofs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Field) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Field: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Field: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= Field_Kind(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cardinality", wireType) + } + m.Cardinality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Cardinality |= Field_Cardinality(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OneofIndex", wireType) + } + m.OneofIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OneofIndex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Packed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Packed = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JsonName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JsonName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Enum) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Enum: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Enum: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Enumvalue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Enumvalue = append(m.Enumvalue, &EnumValue{}) + if err := m.Enumvalue[len(m.Enumvalue)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnumValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnumValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnumValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Option) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Option: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Option: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &Any{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipType(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthType + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupType + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthType + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthType = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowType = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupType = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/wrappers.pb.go b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go new file mode 100644 index 00000000..8f1edb57 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go @@ -0,0 +1,2730 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package types + +import ( + bytes "bytes" + encoding_binary "encoding/binary" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{0} +} +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(m, src) +} +func (m *DoubleValue) XXX_Size() int { + return m.Size() +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*DoubleValue) XXX_MessageName() string { + return "google.protobuf.DoubleValue" +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{1} +} +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(m, src) +} +func (m *FloatValue) XXX_Size() int { + return m.Size() +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*FloatValue) XXX_MessageName() string { + return "google.protobuf.FloatValue" +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{2} +} +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(m, src) +} +func (m *Int64Value) XXX_Size() int { + return m.Size() +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*Int64Value) XXX_MessageName() string { + return "google.protobuf.Int64Value" +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{3} +} +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(m, src) +} +func (m *UInt64Value) XXX_Size() int { + return m.Size() +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*UInt64Value) XXX_MessageName() string { + return "google.protobuf.UInt64Value" +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{4} +} +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(m, src) +} +func (m *Int32Value) XXX_Size() int { + return m.Size() +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*Int32Value) XXX_MessageName() string { + return "google.protobuf.Int32Value" +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{5} +} +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(m, src) +} +func (m *UInt32Value) XXX_Size() int { + return m.Size() +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*UInt32Value) XXX_MessageName() string { + return "google.protobuf.UInt32Value" +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{6} +} +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(m, src) +} +func (m *BoolValue) XXX_Size() int { + return m.Size() +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolValue proto.InternalMessageInfo + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +func (*BoolValue) XXX_MessageName() string { + return "google.protobuf.BoolValue" +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{7} +} +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(m, src) +} +func (m *StringValue) XXX_Size() int { + return m.Size() +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (*StringValue) XXX_MessageName() string { + return "google.protobuf.StringValue" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{8} +} +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(m, src) +} +func (m *BytesValue) XXX_Size() int { + return m.Size() +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (*BytesValue) XXX_MessageName() string { + return "google.protobuf.BytesValue" +} +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) } + +var fileDescriptor_5377b62bda767935 = []byte{ + // 285 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x3b, + 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28, 0xc7, 0xd8, 0xf0, 0x48, + 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, + 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, + 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x45, 0x87, 0x13, 0x6f, 0x38, 0x34, 0xbe, 0x02, + 0x40, 0x22, 0x01, 0x8c, 0x51, 0xac, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x3f, 0x18, 0x19, 0x17, 0x31, + 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, 0x73, 0x87, 0x68, 0x09, 0x80, 0x6a, 0xd1, 0x0b, 0x4f, + 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4c, 0x62, 0x03, 0x9b, 0x65, 0x0c, + 0x08, 0x00, 0x00, 0xff, 0xff, 0x31, 0x55, 0x64, 0x90, 0x0a, 0x02, 0x00, 0x00, +} + +func (this *DoubleValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *FloatValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Int64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *UInt64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Int32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *UInt32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *BoolValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if !this.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *StringValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *BytesValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *DoubleValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *FloatValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Int64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *UInt64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Int32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *UInt32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BytesValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *DoubleValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.DoubleValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FloatValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FloatValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BoolValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StringValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.StringValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BytesValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BytesValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringWrappers(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DoubleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *FloatValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FloatValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + i-- + dAtA[i] = 0xd + } + return len(dAtA) - i, nil +} + +func (m *Int64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Int64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UInt64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Int32Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt32Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UInt32Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BoolValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoolValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value { + i-- + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StringValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BytesValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BytesValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintWrappers(dAtA []byte, offset int, v uint64) int { + offset -= sovWrappers(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedDoubleValue(r randyWrappers, easy bool) *DoubleValue { + this := &DoubleValue{} + this.Value = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedFloatValue(r randyWrappers, easy bool) *FloatValue { + this := &FloatValue{} + this.Value = float32(r.Float32()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedInt64Value(r randyWrappers, easy bool) *Int64Value { + this := &Int64Value{} + this.Value = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedUInt64Value(r randyWrappers, easy bool) *UInt64Value { + this := &UInt64Value{} + this.Value = uint64(uint64(r.Uint32())) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedInt32Value(r randyWrappers, easy bool) *Int32Value { + this := &Int32Value{} + this.Value = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedUInt32Value(r randyWrappers, easy bool) *UInt32Value { + this := &UInt32Value{} + this.Value = uint32(r.Uint32()) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedBoolValue(r randyWrappers, easy bool) *BoolValue { + this := &BoolValue{} + this.Value = bool(bool(r.Intn(2) == 0)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedStringValue(r randyWrappers, easy bool) *StringValue { + this := &StringValue{} + this.Value = string(randStringWrappers(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedBytesValue(r randyWrappers, easy bool) *BytesValue { + this := &BytesValue{} + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +type randyWrappers interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneWrappers(r randyWrappers) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringWrappers(r randyWrappers) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneWrappers(r) + } + return string(tmps) +} +func randUnrecognizedWrappers(r randyWrappers, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldWrappers(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldWrappers(dAtA []byte, r randyWrappers, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateWrappers(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *DoubleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *FloatValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 5 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Int64Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UInt64Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Int32Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UInt32Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BoolValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StringValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BytesValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovWrappers(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWrappers(x uint64) (n int) { + return sovWrappers(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DoubleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DoubleValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *FloatValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FloatValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Int64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *UInt64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Int32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *UInt32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BoolValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StringValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BytesValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BytesValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringWrappers(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DoubleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Value = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoolValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoolValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoolValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWrappers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthWrappers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWrappers(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWrappers + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupWrappers + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthWrappers + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthWrappers = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWrappers = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupWrappers = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go new file mode 100644 index 00000000..d905df36 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go @@ -0,0 +1,300 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +func NewPopulatedStdDouble(r randyWrappers, easy bool) *float64 { + v := NewPopulatedDoubleValue(r, easy) + return &v.Value +} + +func SizeOfStdDouble(v float64) int { + pv := &DoubleValue{Value: v} + return pv.Size() +} + +func StdDoubleMarshal(v float64) ([]byte, error) { + size := SizeOfStdDouble(v) + buf := make([]byte, size) + _, err := StdDoubleMarshalTo(v, buf) + return buf, err +} + +func StdDoubleMarshalTo(v float64, data []byte) (int, error) { + pv := &DoubleValue{Value: v} + return pv.MarshalTo(data) +} + +func StdDoubleUnmarshal(v *float64, data []byte) error { + pv := &DoubleValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdFloat(r randyWrappers, easy bool) *float32 { + v := NewPopulatedFloatValue(r, easy) + return &v.Value +} + +func SizeOfStdFloat(v float32) int { + pv := &FloatValue{Value: v} + return pv.Size() +} + +func StdFloatMarshal(v float32) ([]byte, error) { + size := SizeOfStdFloat(v) + buf := make([]byte, size) + _, err := StdFloatMarshalTo(v, buf) + return buf, err +} + +func StdFloatMarshalTo(v float32, data []byte) (int, error) { + pv := &FloatValue{Value: v} + return pv.MarshalTo(data) +} + +func StdFloatUnmarshal(v *float32, data []byte) error { + pv := &FloatValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdInt64(r randyWrappers, easy bool) *int64 { + v := NewPopulatedInt64Value(r, easy) + return &v.Value +} + +func SizeOfStdInt64(v int64) int { + pv := &Int64Value{Value: v} + return pv.Size() +} + +func StdInt64Marshal(v int64) ([]byte, error) { + size := SizeOfStdInt64(v) + buf := make([]byte, size) + _, err := StdInt64MarshalTo(v, buf) + return buf, err +} + +func StdInt64MarshalTo(v int64, data []byte) (int, error) { + pv := &Int64Value{Value: v} + return pv.MarshalTo(data) +} + +func StdInt64Unmarshal(v *int64, data []byte) error { + pv := &Int64Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdUInt64(r randyWrappers, easy bool) *uint64 { + v := NewPopulatedUInt64Value(r, easy) + return &v.Value +} + +func SizeOfStdUInt64(v uint64) int { + pv := &UInt64Value{Value: v} + return pv.Size() +} + +func StdUInt64Marshal(v uint64) ([]byte, error) { + size := SizeOfStdUInt64(v) + buf := make([]byte, size) + _, err := StdUInt64MarshalTo(v, buf) + return buf, err +} + +func StdUInt64MarshalTo(v uint64, data []byte) (int, error) { + pv := &UInt64Value{Value: v} + return pv.MarshalTo(data) +} + +func StdUInt64Unmarshal(v *uint64, data []byte) error { + pv := &UInt64Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdInt32(r randyWrappers, easy bool) *int32 { + v := NewPopulatedInt32Value(r, easy) + return &v.Value +} + +func SizeOfStdInt32(v int32) int { + pv := &Int32Value{Value: v} + return pv.Size() +} + +func StdInt32Marshal(v int32) ([]byte, error) { + size := SizeOfStdInt32(v) + buf := make([]byte, size) + _, err := StdInt32MarshalTo(v, buf) + return buf, err +} + +func StdInt32MarshalTo(v int32, data []byte) (int, error) { + pv := &Int32Value{Value: v} + return pv.MarshalTo(data) +} + +func StdInt32Unmarshal(v *int32, data []byte) error { + pv := &Int32Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdUInt32(r randyWrappers, easy bool) *uint32 { + v := NewPopulatedUInt32Value(r, easy) + return &v.Value +} + +func SizeOfStdUInt32(v uint32) int { + pv := &UInt32Value{Value: v} + return pv.Size() +} + +func StdUInt32Marshal(v uint32) ([]byte, error) { + size := SizeOfStdUInt32(v) + buf := make([]byte, size) + _, err := StdUInt32MarshalTo(v, buf) + return buf, err +} + +func StdUInt32MarshalTo(v uint32, data []byte) (int, error) { + pv := &UInt32Value{Value: v} + return pv.MarshalTo(data) +} + +func StdUInt32Unmarshal(v *uint32, data []byte) error { + pv := &UInt32Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdBool(r randyWrappers, easy bool) *bool { + v := NewPopulatedBoolValue(r, easy) + return &v.Value +} + +func SizeOfStdBool(v bool) int { + pv := &BoolValue{Value: v} + return pv.Size() +} + +func StdBoolMarshal(v bool) ([]byte, error) { + size := SizeOfStdBool(v) + buf := make([]byte, size) + _, err := StdBoolMarshalTo(v, buf) + return buf, err +} + +func StdBoolMarshalTo(v bool, data []byte) (int, error) { + pv := &BoolValue{Value: v} + return pv.MarshalTo(data) +} + +func StdBoolUnmarshal(v *bool, data []byte) error { + pv := &BoolValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdString(r randyWrappers, easy bool) *string { + v := NewPopulatedStringValue(r, easy) + return &v.Value +} + +func SizeOfStdString(v string) int { + pv := &StringValue{Value: v} + return pv.Size() +} + +func StdStringMarshal(v string) ([]byte, error) { + size := SizeOfStdString(v) + buf := make([]byte, size) + _, err := StdStringMarshalTo(v, buf) + return buf, err +} + +func StdStringMarshalTo(v string, data []byte) (int, error) { + pv := &StringValue{Value: v} + return pv.MarshalTo(data) +} + +func StdStringUnmarshal(v *string, data []byte) error { + pv := &StringValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdBytes(r randyWrappers, easy bool) *[]byte { + v := NewPopulatedBytesValue(r, easy) + return &v.Value +} + +func SizeOfStdBytes(v []byte) int { + pv := &BytesValue{Value: v} + return pv.Size() +} + +func StdBytesMarshal(v []byte) ([]byte, error) { + size := SizeOfStdBytes(v) + buf := make([]byte, size) + _, err := StdBytesMarshalTo(v, buf) + return buf, err +} + +func StdBytesMarshalTo(v []byte, data []byte) (int, error) { + pv := &BytesValue{Value: v} + return pv.MarshalTo(data) +} + +func StdBytesUnmarshal(v *[]byte, data []byte) error { + pv := &BytesValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} diff --git a/vendor/github.com/google/shlex/COPYING b/vendor/github.com/google/shlex/COPYING new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/google/shlex/COPYING @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/shlex/README b/vendor/github.com/google/shlex/README new file mode 100644 index 00000000..c86bcc06 --- /dev/null +++ b/vendor/github.com/google/shlex/README @@ -0,0 +1,2 @@ +go-shlex is a simple lexer for go that supports shell-style quoting, +commenting, and escaping. diff --git a/vendor/github.com/google/shlex/shlex.go b/vendor/github.com/google/shlex/shlex.go new file mode 100644 index 00000000..3cb37b7e --- /dev/null +++ b/vendor/github.com/google/shlex/shlex.go @@ -0,0 +1,417 @@ +/* +Copyright 2012 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package shlex implements a simple lexer which splits input in to tokens using +shell-style rules for quoting and commenting. + +The basic use case uses the default ASCII lexer to split a string into sub-strings: + + shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"} + +To process a stream of strings: + + l := NewLexer(os.Stdin) + for ; token, err := l.Next(); err != nil { + // process token + } + +To access the raw token stream (which includes tokens for comments): + + t := NewTokenizer(os.Stdin) + for ; token, err := t.Next(); err != nil { + // process token + } + +*/ +package shlex + +import ( + "bufio" + "fmt" + "io" + "strings" +) + +// TokenType is a top-level token classification: A word, space, comment, unknown. +type TokenType int + +// runeTokenClass is the type of a UTF-8 character classification: A quote, space, escape. +type runeTokenClass int + +// the internal state used by the lexer state machine +type lexerState int + +// Token is a (type, value) pair representing a lexographical token. +type Token struct { + tokenType TokenType + value string +} + +// Equal reports whether tokens a, and b, are equal. +// Two tokens are equal if both their types and values are equal. A nil token can +// never be equal to another token. +func (a *Token) Equal(b *Token) bool { + if a == nil || b == nil { + return false + } + if a.tokenType != b.tokenType { + return false + } + return a.value == b.value +} + +// Named classes of UTF-8 runes +const ( + spaceRunes = " \t\r\n" + escapingQuoteRunes = `"` + nonEscapingQuoteRunes = "'" + escapeRunes = `\` + commentRunes = "#" +) + +// Classes of rune token +const ( + unknownRuneClass runeTokenClass = iota + spaceRuneClass + escapingQuoteRuneClass + nonEscapingQuoteRuneClass + escapeRuneClass + commentRuneClass + eofRuneClass +) + +// Classes of lexographic token +const ( + UnknownToken TokenType = iota + WordToken + SpaceToken + CommentToken +) + +// Lexer state machine states +const ( + startState lexerState = iota // no runes have been seen + inWordState // processing regular runes in a word + escapingState // we have just consumed an escape rune; the next rune is literal + escapingQuotedState // we have just consumed an escape rune within a quoted string + quotingEscapingState // we are within a quoted string that supports escaping ("...") + quotingState // we are within a string that does not support escaping ('...') + commentState // we are within a comment (everything following an unquoted or unescaped # +) + +// tokenClassifier is used for classifying rune characters. +type tokenClassifier map[rune]runeTokenClass + +func (typeMap tokenClassifier) addRuneClass(runes string, tokenType runeTokenClass) { + for _, runeChar := range runes { + typeMap[runeChar] = tokenType + } +} + +// newDefaultClassifier creates a new classifier for ASCII characters. +func newDefaultClassifier() tokenClassifier { + t := tokenClassifier{} + t.addRuneClass(spaceRunes, spaceRuneClass) + t.addRuneClass(escapingQuoteRunes, escapingQuoteRuneClass) + t.addRuneClass(nonEscapingQuoteRunes, nonEscapingQuoteRuneClass) + t.addRuneClass(escapeRunes, escapeRuneClass) + t.addRuneClass(commentRunes, commentRuneClass) + return t +} + +// ClassifyRune classifiees a rune +func (t tokenClassifier) ClassifyRune(runeVal rune) runeTokenClass { + return t[runeVal] +} + +// Lexer turns an input stream into a sequence of tokens. Whitespace and comments are skipped. +type Lexer Tokenizer + +// NewLexer creates a new lexer from an input stream. +func NewLexer(r io.Reader) *Lexer { + + return (*Lexer)(NewTokenizer(r)) +} + +// Next returns the next word, or an error. If there are no more words, +// the error will be io.EOF. +func (l *Lexer) Next() (string, error) { + for { + token, err := (*Tokenizer)(l).Next() + if err != nil { + return "", err + } + switch token.tokenType { + case WordToken: + return token.value, nil + case CommentToken: + // skip comments + default: + return "", fmt.Errorf("Unknown token type: %v", token.tokenType) + } + } +} + +// Tokenizer turns an input stream into a sequence of typed tokens +type Tokenizer struct { + input bufio.Reader + classifier tokenClassifier +} + +// NewTokenizer creates a new tokenizer from an input stream. +func NewTokenizer(r io.Reader) *Tokenizer { + input := bufio.NewReader(r) + classifier := newDefaultClassifier() + return &Tokenizer{ + input: *input, + classifier: classifier} +} + +// scanStream scans the stream for the next token using the internal state machine. +// It will panic if it encounters a rune which it does not know how to handle. +func (t *Tokenizer) scanStream() (*Token, error) { + state := startState + var tokenType TokenType + var value []rune + var nextRune rune + var nextRuneType runeTokenClass + var err error + + for { + nextRune, _, err = t.input.ReadRune() + nextRuneType = t.classifier.ClassifyRune(nextRune) + + if err == io.EOF { + nextRuneType = eofRuneClass + err = nil + } else if err != nil { + return nil, err + } + + switch state { + case startState: // no runes read yet + { + switch nextRuneType { + case eofRuneClass: + { + return nil, io.EOF + } + case spaceRuneClass: + { + } + case escapingQuoteRuneClass: + { + tokenType = WordToken + state = quotingEscapingState + } + case nonEscapingQuoteRuneClass: + { + tokenType = WordToken + state = quotingState + } + case escapeRuneClass: + { + tokenType = WordToken + state = escapingState + } + case commentRuneClass: + { + tokenType = CommentToken + state = commentState + } + default: + { + tokenType = WordToken + value = append(value, nextRune) + state = inWordState + } + } + } + case inWordState: // in a regular word + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case spaceRuneClass: + { + t.input.UnreadRune() + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case escapingQuoteRuneClass: + { + state = quotingEscapingState + } + case nonEscapingQuoteRuneClass: + { + state = quotingState + } + case escapeRuneClass: + { + state = escapingState + } + default: + { + value = append(value, nextRune) + } + } + } + case escapingState: // the rune after an escape character + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found after escape character") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + default: + { + state = inWordState + value = append(value, nextRune) + } + } + } + case escapingQuotedState: // the next rune after an escape character, in double quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found after escape character") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + default: + { + state = quotingEscapingState + value = append(value, nextRune) + } + } + } + case quotingEscapingState: // in escaping double quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found when expecting closing quote") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case escapingQuoteRuneClass: + { + state = inWordState + } + case escapeRuneClass: + { + state = escapingQuotedState + } + default: + { + value = append(value, nextRune) + } + } + } + case quotingState: // in non-escaping single quotes + { + switch nextRuneType { + case eofRuneClass: + { + err = fmt.Errorf("EOF found when expecting closing quote") + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case nonEscapingQuoteRuneClass: + { + state = inWordState + } + default: + { + value = append(value, nextRune) + } + } + } + case commentState: // in a comment + { + switch nextRuneType { + case eofRuneClass: + { + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } + case spaceRuneClass: + { + if nextRune == '\n' { + state = startState + token := &Token{ + tokenType: tokenType, + value: string(value)} + return token, err + } else { + value = append(value, nextRune) + } + } + default: + { + value = append(value, nextRune) + } + } + } + default: + { + return nil, fmt.Errorf("Unexpected state: %v", state) + } + } + } +} + +// Next returns the next token in the stream. +func (t *Tokenizer) Next() (*Token, error) { + return t.scanStream() +} + +// Split partitions a string into a slice of strings. +func Split(s string) ([]string, error) { + l := NewLexer(strings.NewReader(s)) + subStrings := make([]string, 0) + for { + word, err := l.Next() + if err != nil { + if err == io.EOF { + return subStrings, nil + } + return subStrings, err + } + subStrings = append(subStrings, word) + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE b/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE new file mode 100644 index 00000000..abe5fe17 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2016, gRPC Ecosystem +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of grpc-opentracing nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS b/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS new file mode 100644 index 00000000..5cfe0175 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS @@ -0,0 +1,23 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the GRPC project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of GRPC, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of GRPC. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of GRPC or any code incorporated within this +implementation of GRPC constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of GRPC +shall terminate as of the date such litigation is filed. +Status API Training Shop Blog About diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md new file mode 100644 index 00000000..78c49dbb --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md @@ -0,0 +1,57 @@ +# OpenTracing support for gRPC in Go + +The `otgrpc` package makes it easy to add OpenTracing support to gRPC-based +systems in Go. + +## Installation + +``` +go get github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc +``` + +## Documentation + +See the basic usage examples below and the [package documentation on +godoc.org](https://godoc.org/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc). + +## Client-side usage example + +Wherever you call `grpc.Dial`: + +```go +// You must have some sort of OpenTracing Tracer instance on hand. +var tracer opentracing.Tracer = ... +... + +// Set up a connection to the server peer. +conn, err := grpc.Dial( + address, + ... // other options + grpc.WithUnaryInterceptor( + otgrpc.OpenTracingClientInterceptor(tracer)), + grpc.WithStreamInterceptor( + otgrpc.OpenTracingStreamClientInterceptor(tracer))) + +// All future RPC activity involving `conn` will be automatically traced. +``` + +## Server-side usage example + +Wherever you call `grpc.NewServer`: + +```go +// You must have some sort of OpenTracing Tracer instance on hand. +var tracer opentracing.Tracer = ... +... + +// Initialize the gRPC server. +s := grpc.NewServer( + ... // other options + grpc.UnaryInterceptor( + otgrpc.OpenTracingServerInterceptor(tracer)), + grpc.StreamInterceptor( + otgrpc.OpenTracingStreamServerInterceptor(tracer))) + +// All future RPC activity involving `s` will be automatically traced. +``` + diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go new file mode 100644 index 00000000..3414e55c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go @@ -0,0 +1,239 @@ +package otgrpc + +import ( + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "io" + "runtime" + "sync/atomic" +) + +// OpenTracingClientInterceptor returns a grpc.UnaryClientInterceptor suitable +// for use in a grpc.Dial call. +// +// For example: +// +// conn, err := grpc.Dial( +// address, +// ..., // (existing DialOptions) +// grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer))) +// +// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC +// metadata; they will also look in the context.Context for an active +// in-process parent Span and establish a ChildOf reference if such a parent +// Span could be found. +func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryClientInterceptor { + otgrpcOpts := newOptions() + otgrpcOpts.apply(optFuncs...) + return func( + ctx context.Context, + method string, + req, resp interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + opts ...grpc.CallOption, + ) error { + var err error + var parentCtx opentracing.SpanContext + if parent := opentracing.SpanFromContext(ctx); parent != nil { + parentCtx = parent.Context() + } + if otgrpcOpts.inclusionFunc != nil && + !otgrpcOpts.inclusionFunc(parentCtx, method, req, resp) { + return invoker(ctx, method, req, resp, cc, opts...) + } + clientSpan := tracer.StartSpan( + method, + opentracing.ChildOf(parentCtx), + ext.SpanKindRPCClient, + gRPCComponentTag, + ) + defer clientSpan.Finish() + ctx = injectSpanContext(ctx, tracer, clientSpan) + if otgrpcOpts.logPayloads { + clientSpan.LogFields(log.Object("gRPC request", req)) + } + err = invoker(ctx, method, req, resp, cc, opts...) + if err == nil { + if otgrpcOpts.logPayloads { + clientSpan.LogFields(log.Object("gRPC response", resp)) + } + } else { + SetSpanTags(clientSpan, err, true) + clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + } + if otgrpcOpts.decorator != nil { + otgrpcOpts.decorator(clientSpan, method, req, resp, err) + } + return err + } +} + +// OpenTracingStreamClientInterceptor returns a grpc.StreamClientInterceptor suitable +// for use in a grpc.Dial call. The interceptor instruments streaming RPCs by creating +// a single span to correspond to the lifetime of the RPC's stream. +// +// For example: +// +// conn, err := grpc.Dial( +// address, +// ..., // (existing DialOptions) +// grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer))) +// +// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC +// metadata; they will also look in the context.Context for an active +// in-process parent Span and establish a ChildOf reference if such a parent +// Span could be found. +func OpenTracingStreamClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamClientInterceptor { + otgrpcOpts := newOptions() + otgrpcOpts.apply(optFuncs...) + return func( + ctx context.Context, + desc *grpc.StreamDesc, + cc *grpc.ClientConn, + method string, + streamer grpc.Streamer, + opts ...grpc.CallOption, + ) (grpc.ClientStream, error) { + var err error + var parentCtx opentracing.SpanContext + if parent := opentracing.SpanFromContext(ctx); parent != nil { + parentCtx = parent.Context() + } + if otgrpcOpts.inclusionFunc != nil && + !otgrpcOpts.inclusionFunc(parentCtx, method, nil, nil) { + return streamer(ctx, desc, cc, method, opts...) + } + + clientSpan := tracer.StartSpan( + method, + opentracing.ChildOf(parentCtx), + ext.SpanKindRPCClient, + gRPCComponentTag, + ) + ctx = injectSpanContext(ctx, tracer, clientSpan) + cs, err := streamer(ctx, desc, cc, method, opts...) + if err != nil { + clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + SetSpanTags(clientSpan, err, true) + clientSpan.Finish() + return cs, err + } + return newOpenTracingClientStream(cs, method, desc, clientSpan, otgrpcOpts), nil + } +} + +func newOpenTracingClientStream(cs grpc.ClientStream, method string, desc *grpc.StreamDesc, clientSpan opentracing.Span, otgrpcOpts *options) grpc.ClientStream { + finishChan := make(chan struct{}) + + isFinished := new(int32) + *isFinished = 0 + finishFunc := func(err error) { + // The current OpenTracing specification forbids finishing a span more than + // once. Since we have multiple code paths that could concurrently call + // `finishFunc`, we need to add some sort of synchronization to guard against + // multiple finishing. + if !atomic.CompareAndSwapInt32(isFinished, 0, 1) { + return + } + close(finishChan) + defer clientSpan.Finish() + if err != nil { + clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + SetSpanTags(clientSpan, err, true) + } + if otgrpcOpts.decorator != nil { + otgrpcOpts.decorator(clientSpan, method, nil, nil, err) + } + } + go func() { + select { + case <-finishChan: + // The client span is being finished by another code path; hence, no + // action is necessary. + case <-cs.Context().Done(): + finishFunc(cs.Context().Err()) + } + }() + otcs := &openTracingClientStream{ + ClientStream: cs, + desc: desc, + finishFunc: finishFunc, + } + + // The `ClientStream` interface allows one to omit calling `Recv` if it's + // known that the result will be `io.EOF`. See + // http://stackoverflow.com/q/42915337 + // In such cases, there's nothing that triggers the span to finish. We, + // therefore, set a finalizer so that the span and the context goroutine will + // at least be cleaned up when the garbage collector is run. + runtime.SetFinalizer(otcs, func(otcs *openTracingClientStream) { + otcs.finishFunc(nil) + }) + return otcs +} + +type openTracingClientStream struct { + grpc.ClientStream + desc *grpc.StreamDesc + finishFunc func(error) +} + +func (cs *openTracingClientStream) Header() (metadata.MD, error) { + md, err := cs.ClientStream.Header() + if err != nil { + cs.finishFunc(err) + } + return md, err +} + +func (cs *openTracingClientStream) SendMsg(m interface{}) error { + err := cs.ClientStream.SendMsg(m) + if err != nil { + cs.finishFunc(err) + } + return err +} + +func (cs *openTracingClientStream) RecvMsg(m interface{}) error { + err := cs.ClientStream.RecvMsg(m) + if err == io.EOF { + cs.finishFunc(nil) + return err + } else if err != nil { + cs.finishFunc(err) + return err + } + if !cs.desc.ServerStreams { + cs.finishFunc(nil) + } + return err +} + +func (cs *openTracingClientStream) CloseSend() error { + err := cs.ClientStream.CloseSend() + if err != nil { + cs.finishFunc(err) + } + return err +} + +func injectSpanContext(ctx context.Context, tracer opentracing.Tracer, clientSpan opentracing.Span) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + md = metadata.New(nil) + } else { + md = md.Copy() + } + mdWriter := metadataReaderWriter{md} + err := tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, mdWriter) + // We have no better place to record an error than the Span itself :-/ + if err != nil { + clientSpan.LogFields(log.String("event", "Tracer.Inject() failed"), log.Error(err)) + } + return metadata.NewOutgoingContext(ctx, md) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go new file mode 100644 index 00000000..41a6346f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go @@ -0,0 +1,69 @@ +package otgrpc + +import ( + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// A Class is a set of types of outcomes (including errors) that will often +// be handled in the same way. +type Class string + +const ( + Unknown Class = "0xx" + // Success represents outcomes that achieved the desired results. + Success Class = "2xx" + // ClientError represents errors that were the client's fault. + ClientError Class = "4xx" + // ServerError represents errors that were the server's fault. + ServerError Class = "5xx" +) + +// ErrorClass returns the class of the given error +func ErrorClass(err error) Class { + if s, ok := status.FromError(err); ok { + switch s.Code() { + // Success or "success" + case codes.OK, codes.Canceled: + return Success + + // Client errors + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, + codes.PermissionDenied, codes.Unauthenticated, codes.FailedPrecondition, + codes.OutOfRange: + return ClientError + + // Server errors + case codes.DeadlineExceeded, codes.ResourceExhausted, codes.Aborted, + codes.Unimplemented, codes.Internal, codes.Unavailable, codes.DataLoss: + return ServerError + + // Not sure + case codes.Unknown: + fallthrough + default: + return Unknown + } + } + return Unknown +} + +// SetSpanTags sets one or more tags on the given span according to the +// error. +func SetSpanTags(span opentracing.Span, err error, client bool) { + c := ErrorClass(err) + code := codes.Unknown + if s, ok := status.FromError(err); ok { + code = s.Code() + } + span.SetTag("response_code", code) + span.SetTag("response_class", c) + if err == nil { + return + } + if client || c == ServerError { + ext.Error.Set(span, true) + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go new file mode 100644 index 00000000..903e8382 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go @@ -0,0 +1,76 @@ +package otgrpc + +import "github.com/opentracing/opentracing-go" + +// Option instances may be used in OpenTracing(Server|Client)Interceptor +// initialization. +// +// See this post about the "functional options" pattern: +// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type Option func(o *options) + +// LogPayloads returns an Option that tells the OpenTracing instrumentation to +// try to log application payloads in both directions. +func LogPayloads() Option { + return func(o *options) { + o.logPayloads = true + } +} + +// SpanInclusionFunc provides an optional mechanism to decide whether or not +// to trace a given gRPC call. Return true to create a Span and initiate +// tracing, false to not create a Span and not trace. +// +// parentSpanCtx may be nil if no parent could be extraction from either the Go +// context.Context (on the client) or the RPC (on the server). +type SpanInclusionFunc func( + parentSpanCtx opentracing.SpanContext, + method string, + req, resp interface{}) bool + +// IncludingSpans binds a IncludeSpanFunc to the options +func IncludingSpans(inclusionFunc SpanInclusionFunc) Option { + return func(o *options) { + o.inclusionFunc = inclusionFunc + } +} + +// SpanDecoratorFunc provides an (optional) mechanism for otgrpc users to add +// arbitrary tags/logs/etc to the opentracing.Span associated with client +// and/or server RPCs. +type SpanDecoratorFunc func( + span opentracing.Span, + method string, + req, resp interface{}, + grpcError error) + +// SpanDecorator binds a function that decorates gRPC Spans. +func SpanDecorator(decorator SpanDecoratorFunc) Option { + return func(o *options) { + o.decorator = decorator + } +} + +// The internal-only options struct. Obviously overkill at the moment; but will +// scale well as production use dictates other configuration and tuning +// parameters. +type options struct { + logPayloads bool + decorator SpanDecoratorFunc + // May be nil. + inclusionFunc SpanInclusionFunc +} + +// newOptions returns the default options. +func newOptions() *options { + return &options{ + logPayloads: false, + inclusionFunc: nil, + } +} + +func (o *options) apply(opts ...Option) { + for _, opt := range opts { + opt(o) + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go new file mode 100644 index 00000000..4ff3d199 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go @@ -0,0 +1,5 @@ +// Package otgrpc provides OpenTracing support for any gRPC client or server. +// +// See the README for simple usage examples: +// https://github.com/grpc-ecosystem/grpc-opentracing/blob/master/go/otgrpc/README.md +package otgrpc diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go new file mode 100644 index 00000000..62cf54d2 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go @@ -0,0 +1,141 @@ +package otgrpc + +import ( + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// OpenTracingServerInterceptor returns a grpc.UnaryServerInterceptor suitable +// for use in a grpc.NewServer call. +// +// For example: +// +// s := grpc.NewServer( +// ..., // (existing ServerOptions) +// grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer))) +// +// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC +// metadata; if found, the server span will act as the ChildOf that RPC +// SpanContext. +// +// Root or not, the server Span will be embedded in the context.Context for the +// application-specific gRPC handler(s) to access. +func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryServerInterceptor { + otgrpcOpts := newOptions() + otgrpcOpts.apply(optFuncs...) + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (resp interface{}, err error) { + spanContext, err := extractSpanContext(ctx, tracer) + if err != nil && err != opentracing.ErrSpanContextNotFound { + // TODO: establish some sort of error reporting mechanism here. We + // don't know where to put such an error and must rely on Tracer + // implementations to do something appropriate for the time being. + } + if otgrpcOpts.inclusionFunc != nil && + !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, req, nil) { + return handler(ctx, req) + } + serverSpan := tracer.StartSpan( + info.FullMethod, + ext.RPCServerOption(spanContext), + gRPCComponentTag, + ) + defer serverSpan.Finish() + + ctx = opentracing.ContextWithSpan(ctx, serverSpan) + if otgrpcOpts.logPayloads { + serverSpan.LogFields(log.Object("gRPC request", req)) + } + resp, err = handler(ctx, req) + if err == nil { + if otgrpcOpts.logPayloads { + serverSpan.LogFields(log.Object("gRPC response", resp)) + } + } else { + SetSpanTags(serverSpan, err, false) + serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + } + if otgrpcOpts.decorator != nil { + otgrpcOpts.decorator(serverSpan, info.FullMethod, req, resp, err) + } + return resp, err + } +} + +// OpenTracingStreamServerInterceptor returns a grpc.StreamServerInterceptor suitable +// for use in a grpc.NewServer call. The interceptor instruments streaming RPCs by +// creating a single span to correspond to the lifetime of the RPC's stream. +// +// For example: +// +// s := grpc.NewServer( +// ..., // (existing ServerOptions) +// grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer))) +// +// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC +// metadata; if found, the server span will act as the ChildOf that RPC +// SpanContext. +// +// Root or not, the server Span will be embedded in the context.Context for the +// application-specific gRPC handler(s) to access. +func OpenTracingStreamServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamServerInterceptor { + otgrpcOpts := newOptions() + otgrpcOpts.apply(optFuncs...) + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + spanContext, err := extractSpanContext(ss.Context(), tracer) + if err != nil && err != opentracing.ErrSpanContextNotFound { + // TODO: establish some sort of error reporting mechanism here. We + // don't know where to put such an error and must rely on Tracer + // implementations to do something appropriate for the time being. + } + if otgrpcOpts.inclusionFunc != nil && + !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, nil, nil) { + return handler(srv, ss) + } + + serverSpan := tracer.StartSpan( + info.FullMethod, + ext.RPCServerOption(spanContext), + gRPCComponentTag, + ) + defer serverSpan.Finish() + ss = &openTracingServerStream{ + ServerStream: ss, + ctx: opentracing.ContextWithSpan(ss.Context(), serverSpan), + } + err = handler(srv, ss) + if err != nil { + SetSpanTags(serverSpan, err, false) + serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + } + if otgrpcOpts.decorator != nil { + otgrpcOpts.decorator(serverSpan, info.FullMethod, nil, nil, err) + } + return err + } +} + +type openTracingServerStream struct { + grpc.ServerStream + ctx context.Context +} + +func (ss *openTracingServerStream) Context() context.Context { + return ss.ctx +} + +func extractSpanContext(ctx context.Context, tracer opentracing.Tracer) (opentracing.SpanContext, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + md = metadata.New(nil) + } + return tracer.Extract(opentracing.HTTPHeaders, metadataReaderWriter{md}) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go new file mode 100644 index 00000000..9abd5eaa --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go @@ -0,0 +1,42 @@ +package otgrpc + +import ( + "strings" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "google.golang.org/grpc/metadata" +) + +var ( + // Morally a const: + gRPCComponentTag = opentracing.Tag{string(ext.Component), "gRPC"} +) + +// metadataReaderWriter satisfies both the opentracing.TextMapReader and +// opentracing.TextMapWriter interfaces. +type metadataReaderWriter struct { + metadata.MD +} + +func (w metadataReaderWriter) Set(key, val string) { + // The GRPC HPACK implementation rejects any uppercase keys here. + // + // As such, since the HTTP_HEADERS format is case-insensitive anyway, we + // blindly lowercase the key (which is guaranteed to work in the + // Inject/Extract sense per the OpenTracing spec). + key = strings.ToLower(key) + w.MD[key] = append(w.MD[key], val) +} + +func (w metadataReaderWriter) ForeachKey(handler func(key, val string) error) error { + for k, vals := range w.MD { + for _, v := range vals { + if err := handler(k, v); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.gitignore b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore new file mode 100644 index 00000000..daf913b1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml b/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml new file mode 100644 index 00000000..1a0bbea6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: + - tip diff --git a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md new file mode 100644 index 00000000..8910fcc0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/README.md @@ -0,0 +1,41 @@ +go-immutable-radix [![Build Status](https://travis-ci.org/hashicorp/go-immutable-radix.png)](https://travis-ci.org/hashicorp/go-immutable-radix) +========= + +Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +A tree supports using a transaction to batch multiple updates (insert, delete) +in a more efficient manner than performing each operation one at a time. + +For a mutable variant, see [go-radix](https://github.com/armon/go-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("foo"), 1) +r, _, _ = r.Insert([]byte("bar"), 2) +r, _, _ = r.Insert([]byte("foobar"), 2) + +// Find the longest prefix match +m, _, _ := r.Root().LongestPrefix([]byte("foozip")) +if string(m) != "foo" { + panic("should be foo") +} +``` + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/edges.go new file mode 100644 index 00000000..a6367477 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/edges.go @@ -0,0 +1,21 @@ +package iradix + +import "sort" + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go new file mode 100644 index 00000000..c7172c40 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -0,0 +1,657 @@ +package iradix + +import ( + "bytes" + "strings" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // defaultModifiedCache is the default size of the modified node + // cache used per transaction. This is used to cache the updates + // to the nodes near the root, while the leaves do not need to be + // cached. This is important for very large transactions to prevent + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. + defaultModifiedCache = 8192 +) + +// Tree implements an immutable radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over a standard +// hash map is prefix-based lookups and ordered iteration. The immutability +// means that it is safe to concurrently read from a Tree without any +// coordination. +type Tree struct { + root *Node + size int +} + +// New returns an empty Tree +func New() *Tree { + t := &Tree{ + root: &Node{ + mutateCh: make(chan struct{}), + }, + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// Txn is a transaction on the tree. This transaction is applied +// atomically and returns a new tree when committed. A transaction +// is not thread safe, and should only be used by a single goroutine. +type Txn struct { + // root is the modified root for the transaction. + root *Node + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool +} + +// Txn starts a new transaction that can be used to mutate the tree +func (t *Tree) Txn() *Txn { + txn := &Txn{ + root: t.root, + snap: t.root, + size: t.size, + } + return txn +} + +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { + // Ensure the writable set exists. + if t.writable == nil { + lru, err := simplelru.NewLRU(defaultModifiedCache, nil) + if err != nil { + panic(err) + } + t.writable = lru + } + + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + return n + } + + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } + if n.prefix != nil { + nc.prefix = make([]byte, len(n.prefix)) + copy(nc.prefix, n.prefix) + } + if len(n.edges) != 0 { + nc.edges = make([]edge, len(n.edges)) + copy(nc.edges, n.edges) + } + + // Mark this node as writable. + t.writable.Add(nc, nil) + return nc +} + +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn) trackChannelsAndCount(n *Node) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn) mergeChild(n *Node) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge, len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + +// insert does a recursive insertion +func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { + // Handle key exhaustion + if len(search) == 0 { + var oldVal interface{} + didUpdate := false + if n.isLeaf() { + oldVal = n.leaf.val + didUpdate = true + } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate + } + + // Look for the edge + idx, child := n.getEdge(search[0]) + + // No edge, create one + if child == nil { + e := edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + }, + prefix: search, + }, + } + nc := t.writeNode(n, false) + nc.addEdge(e) + return nc, nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, child.prefix) + if commonPrefix == len(child.prefix) { + search = search[commonPrefix:] + newChild, oldVal, didUpdate := t.insert(child, k, search, v) + if newChild != nil { + nc := t.writeNode(n, false) + nc.edges[idx].node = newChild + return nc, oldVal, didUpdate + } + return nil, oldVal, didUpdate + } + + // Split the node + nc := t.writeNode(n, false) + splitNode := &Node{ + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], + } + nc.replaceEdge(edge{ + label: search[0], + node: splitNode, + }) + + // Restore the existing child node + modChild := t.writeNode(child, false) + splitNode.addEdge(edge{ + label: modChild.prefix[commonPrefix], + node: modChild, + }) + modChild.prefix = modChild.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + splitNode.leaf = leaf + return nc, nil, false + } + + // Create a new edge for the node + splitNode.addEdge(edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, + }, + }) + return nc, nil, false +} + +// delete does a recursive deletion +func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { + // Check for key exhaustion + if len(search) == 0 { + if !n.isLeaf() { + return nil, nil + } + + // Remove the leaf node + nc := t.writeNode(n, true) + nc.leaf = nil + + // Check if this node should be merged + if n != t.root && len(nc.edges) == 1 { + t.mergeChild(nc) + } + return nc, n.leaf + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + if child == nil || !bytes.HasPrefix(search, child.prefix) { + return nil, nil + } + + // Consume the search prefix + search = search[len(child.prefix):] + newChild, leaf := t.delete(n, child, search) + if newChild == nil { + return nil, nil + } + + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, leaf +} + +// delete does a recursive deletion +func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(n, child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + +// Insert is used to add or update a given key. The return provides +// the previous value and a bool indicating if any was set. +func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { + newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) + if newRoot != nil { + t.root = newRoot + } + if !didUpdate { + t.size++ + } + return oldVal, didUpdate +} + +// Delete is used to delete a given key. Returns the old value if any, +// and a bool indicating if the key was set. +func (t *Txn) Delete(k []byte) (interface{}, bool) { + newRoot, leaf := t.delete(nil, t.root, k) + if newRoot != nil { + t.root = newRoot + } + if leaf != nil { + t.size-- + return leaf.val, true + } + return nil, false +} + +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + +// Root returns the current root of the radix tree within this +// transaction. The root is not safe across insert and delete operations, +// but can be used to read the current state during a transaction. +func (t *Txn) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Txn) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. +func (t *Txn) Commit() *Tree { + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn) CommitOnly() *Tree { + nt := &Tree{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false +} + +// Insert is used to add or update a given key. The return provides +// the new tree, previous value and a bool indicating if any was set. +func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Insert(k, v) + return txn.Commit(), old, ok +} + +// Delete is used to delete a given key. Returns the new tree, +// old value if any, and a bool indicating if the key was set. +func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Delete(k) + return txn.Commit(), old, ok +} + +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + +// Root returns the root node of the tree which can be used for richer +// query operations. +func (t *Tree) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// concat two byte slices, returning a third new copy +func concat(a, b []byte) []byte { + c := make([]byte, len(a)+len(b)) + copy(c, a) + copy(c[len(a):], b) + return c +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go new file mode 100644 index 00000000..9815e025 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -0,0 +1,91 @@ +package iradix + +import "bytes" + +// Iterator is used to iterate over a set of nodes +// in pre-order +type Iterator struct { + node *Node + stack []edges +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + // Wipe the stack + i.stack = nil + n := i.node + watch = n.mutateCh + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + i.node = n + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + i.node = nil + return + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + i.node = n + return + } else { + i.node = nil + return + } + } +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + +// Next returns the next node in order +func (i *Iterator) Next() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if i.stack == nil && i.node != nil { + i.stack = []edges{ + edges{ + edge{node: i.node}, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack + n := len(i.stack) + last := i.stack[n-1] + elem := last[0].node + + // Update the stack + if len(last) > 1 { + i.stack[n-1] = last[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + i.stack = append(i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, nil, false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go new file mode 100644 index 00000000..ef494fa7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -0,0 +1,352 @@ +package iradix + +import ( + "bytes" + "sort" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(k []byte, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + mutateCh chan struct{} + key []byte + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *Node +} + +// Node is an immutable node in the radix tree +type Node struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *Node) isLeaf() bool { + return n.leaf != nil +} + +func (n *Node) addEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + n.edges = append(n.edges, e) + if idx != num { + copy(n.edges[idx+1:], n.edges[idx:num]) + n.edges[idx] = e + } +} + +func (n *Node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *Node) getEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + search := k + watch := n.mutateCh + for { + // Check for key exhaustion + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.mutateCh, n.leaf.val, true + } + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return watch, nil, false +} + +func (n *Node) Get(k []byte) (interface{}, bool) { + _, val, ok := n.GetWatch(k) + return val, ok +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) { + var last *leafNode + search := k + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return nil, nil, false +} + +// Minimum is used to return the minimum value in the tree +func (n *Node) Minimum() ([]byte, interface{}, bool) { + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return nil, nil, false +} + +// Maximum is used to return the maximum value in the tree +func (n *Node) Maximum() ([]byte, interface{}, bool) { + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + return nil, nil, false +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node) Iterator() *Iterator { + return &Iterator{node: n} +} + +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node) rawIterator() *rawIterator { + iter := &rawIterator{node: n} + iter.Next() + return iter +} + +// Walk is used to walk the tree +func (n *Node) Walk(fn WalkFn) { + recursiveWalk(n, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (n *Node) WalkPath(path []byte, fn WalkFn) { + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +func (n *Node) Seek(prefix []byte) *Seeker { + search := prefix + p := &pos{n: n} + for { + // Check for key exhaution + if len(search) == 0 { + return &Seeker{p} + } + + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= search[0] + }) + p.current = idx + if idx < len(n.edges) { + n = n.edges[idx].node + if bytes.HasPrefix(search, n.prefix) && len(n.edges) > 0 { + search = search[len(n.prefix):] + p.current++ + p = &pos{n: n, prev: p} + continue + } + } + p.current++ + return &Seeker{p} + } +} + +type Seeker struct { + *pos +} + +type pos struct { + n *Node + current int + prev *pos + isLeaf bool +} + +func (s *Seeker) Next() (k []byte, v interface{}, ok bool) { + if s.current >= len(s.n.edges) { + if s.prev == nil { + return nil, nil, false + } + s.pos = s.prev + return s.Next() + } + + edge := s.n.edges[s.current] + s.current++ + if edge.node.leaf != nil && !s.isLeaf { + s.isLeaf = true + s.current-- + return edge.node.leaf.key, edge.node.leaf.val, true + } + s.isLeaf = false + s.pos = &pos{n: edge.node, prev: s.pos} + return s.Next() +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go new file mode 100644 index 00000000..04814c13 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator struct { + // node is the starting node in the tree for the iterator. + node *Node + + // stack keeps track of edges in the frontier. + stack []rawStackEntry + + // pos is the current position of the iterator. + pos *Node + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry struct { + path string + edges edges +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator) Front() *Node { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry{ + rawStackEntry{ + edges: edges{ + edge{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 00000000..be2cc4df --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 00000000..a86c8539 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,177 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + if ent.Value.(*entry) == nil { + return nil, false + } + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// Resize changes the cache size. +func (c *LRU) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 00000000..92d70934 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,39 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Checks if a key exists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clears all cache entries. + Purge() + + // Resizes cache, returning number evicted + Resize(int) int +} diff --git a/vendor/github.com/ishidawataru/sctp/.gitignore b/vendor/github.com/ishidawataru/sctp/.gitignore new file mode 100644 index 00000000..cf2d826c --- /dev/null +++ b/vendor/github.com/ishidawataru/sctp/.gitignore @@ -0,0 +1,16 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +example/example diff --git a/vendor/github.com/ishidawataru/sctp/.travis.yml b/vendor/github.com/ishidawataru/sctp/.travis.yml new file mode 100644 index 00000000..01a76be9 --- /dev/null +++ b/vendor/github.com/ishidawataru/sctp/.travis.yml @@ -0,0 +1,18 @@ +language: go +go: + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + +script: + - go test -v -race ./... + - GOOS=linux GOARCH=amd64 go build . + - GOOS=linux GOARCH=arm go build . + - GOOS=linux GOARCH=arm64 go build . + - GOOS=linux GOARCH=ppc64le go build . + - (go version | grep go1.6 > /dev/null) || GOOS=linux GOARCH=s390x go build . +# can be compiled but not functional: + - GOOS=linux GOARCH=386 go build . + - GOOS=windows GOARCH=amd64 go build . diff --git a/vendor/github.com/ishidawataru/sctp/GO_LICENSE b/vendor/github.com/ishidawataru/sctp/GO_LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/github.com/ishidawataru/sctp/GO_LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ishidawataru/sctp/LICENSE b/vendor/github.com/ishidawataru/sctp/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/github.com/ishidawataru/sctp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ishidawataru/sctp/NOTICE b/vendor/github.com/ishidawataru/sctp/NOTICE new file mode 100644 index 00000000..cfb675fd --- /dev/null +++ b/vendor/github.com/ishidawataru/sctp/NOTICE @@ -0,0 +1,3 @@ +This source code includes following third party code + +- ipsock_linux.go : licensed by the Go authors, see GO_LICENSE file for the license which applies to the code diff --git a/vendor/github.com/ishidawataru/sctp/README.md b/vendor/github.com/ishidawataru/sctp/README.md new file mode 100644 index 00000000..574ececa --- /dev/null +++ b/vendor/github.com/ishidawataru/sctp/README.md @@ -0,0 +1,18 @@ +Stream Control Transmission Protocol (SCTP) +---- + +[![Build Status](https://travis-ci.org/ishidawataru/sctp.svg?branch=master)](https://travis-ci.org/ishidawataru/sctp/builds) + +Examples +---- + +See `example/sctp.go` + +```go +$ cd example +$ go build +$ # run example SCTP server +$ ./example -server -port 1000 -ip 10.10.0.1,10.20.0.1 +$ # run example SCTP client +$ ./example -port 1000 -ip 10.10.0.1,10.20.0.1 +``` diff --git a/vendor/github.com/ishidawataru/sctp/go.mod b/vendor/github.com/ishidawataru/sctp/go.mod new file mode 100644 index 00000000..5adf982b --- /dev/null +++ b/vendor/github.com/ishidawataru/sctp/go.mod @@ -0,0 +1,3 @@ +module github.com/ishidawataru/sctp + +go 1.12 diff --git a/vendor/github.com/ishidawataru/sctp/ipsock_linux.go b/vendor/github.com/ishidawataru/sctp/ipsock_linux.go new file mode 100644 index 00000000..3df30fa4 --- /dev/null +++ b/vendor/github.com/ishidawataru/sctp/ipsock_linux.go @@ -0,0 +1,222 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the GO_LICENSE file. + +package sctp + +import ( + "net" + "os" + "sync" + "syscall" +) + +//from https://github.com/golang/go +// Boolean to int. +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +//from https://github.com/golang/go +func ipToSockaddr(family int, ip net.IP, port int, zone string) (syscall.Sockaddr, error) { + switch family { + case syscall.AF_INET: + if len(ip) == 0 { + ip = net.IPv4zero + } + ip4 := ip.To4() + if ip4 == nil { + return nil, &net.AddrError{Err: "non-IPv4 address", Addr: ip.String()} + } + sa := &syscall.SockaddrInet4{Port: port} + copy(sa.Addr[:], ip4) + return sa, nil + case syscall.AF_INET6: + // In general, an IP wildcard address, which is either + // "0.0.0.0" or "::", means the entire IP addressing + // space. For some historical reason, it is used to + // specify "any available address" on some operations + // of IP node. + // + // When the IP node supports IPv4-mapped IPv6 address, + // we allow an listener to listen to the wildcard + // address of both IP addressing spaces by specifying + // IPv6 wildcard address. + if len(ip) == 0 || ip.Equal(net.IPv4zero) { + ip = net.IPv6zero + } + // We accept any IPv6 address including IPv4-mapped + // IPv6 address. + ip6 := ip.To16() + if ip6 == nil { + return nil, &net.AddrError{Err: "non-IPv6 address", Addr: ip.String()} + } + //we set ZoneId to 0, as currently we use this functon only to probe the IP capabilities of the host + //if real Zone handling is required, the zone cache implementation in golang/net should be pulled here + sa := &syscall.SockaddrInet6{Port: port, ZoneId: 0} + copy(sa.Addr[:], ip6) + return sa, nil + } + return nil, &net.AddrError{Err: "invalid address family", Addr: ip.String()} +} + +//from https://github.com/golang/go +func sockaddr(a *net.TCPAddr, family int) (syscall.Sockaddr, error) { + if a == nil { + return nil, nil + } + return ipToSockaddr(family, a.IP, a.Port, a.Zone) +} + +//from https://github.com/golang/go +type ipStackCapabilities struct { + sync.Once // guards following + ipv4Enabled bool + ipv6Enabled bool + ipv4MappedIPv6Enabled bool +} + +//from https://github.com/golang/go +var ipStackCaps ipStackCapabilities + +//from https://github.com/golang/go +// supportsIPv4 reports whether the platform supports IPv4 networking +// functionality. +func supportsIPv4() bool { + ipStackCaps.Once.Do(ipStackCaps.probe) + return ipStackCaps.ipv4Enabled +} + +//from https://github.com/golang/go +// supportsIPv6 reports whether the platform supports IPv6 networking +// functionality. +func supportsIPv6() bool { + ipStackCaps.Once.Do(ipStackCaps.probe) + return ipStackCaps.ipv6Enabled +} + +//from https://github.com/golang/go +// supportsIPv4map reports whether the platform supports mapping an +// IPv4 address inside an IPv6 address at transport layer +// protocols. See RFC 4291, RFC 4038 and RFC 3493. +func supportsIPv4map() bool { + ipStackCaps.Once.Do(ipStackCaps.probe) + return ipStackCaps.ipv4MappedIPv6Enabled +} + +//from https://github.com/golang/go +// Probe probes IPv4, IPv6 and IPv4-mapped IPv6 communication +// capabilities which are controlled by the IPV6_V6ONLY socket option +// and kernel configuration. +// +// Should we try to use the IPv4 socket interface if we're only +// dealing with IPv4 sockets? As long as the host system understands +// IPv4-mapped IPv6, it's okay to pass IPv4-mapeed IPv6 addresses to +// the IPv6 interface. That simplifies our code and is most +// general. Unfortunately, we need to run on kernels built without +// IPv6 support too. So probe the kernel to figure it out. +func (p *ipStackCapabilities) probe() { + s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_TCP) + switch err { + case syscall.EAFNOSUPPORT, syscall.EPROTONOSUPPORT: + case nil: + syscall.Close(s) + p.ipv4Enabled = true + } + var probes = []struct { + laddr net.TCPAddr + value int + }{ + // IPv6 communication capability + {laddr: net.TCPAddr{IP: net.IPv6loopback}, value: 1}, + // IPv4-mapped IPv6 address communication capability + {laddr: net.TCPAddr{IP: net.IPv4(127, 0, 0, 1)}, value: 0}, + } + + for i := range probes { + s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_TCP) + if err != nil { + continue + } + defer syscall.Close(s) + syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, probes[i].value) + sa, err := sockaddr(&(probes[i].laddr), syscall.AF_INET6) + if err != nil { + continue + } + if err := syscall.Bind(s, sa); err != nil { + continue + } + if i == 0 { + p.ipv6Enabled = true + } else { + p.ipv4MappedIPv6Enabled = true + } + } +} + +//from https://github.com/golang/go +//Change: we check the first IP address in the list of candidate SCTP IP addresses +func (a *SCTPAddr) isWildcard() bool { + if a == nil { + return true + } + if 0 == len(a.IPAddrs) { + return true + } + + return a.IPAddrs[0].IP.IsUnspecified() +} + +func (a *SCTPAddr) family() int { + if a != nil { + for _, ip := range a.IPAddrs { + if ip.IP.To4() == nil { + return syscall.AF_INET6 + } + } + } + return syscall.AF_INET +} + +//from https://github.com/golang/go +func favoriteAddrFamily(network string, laddr *SCTPAddr, raddr *SCTPAddr, mode string) (family int, ipv6only bool) { + switch network[len(network)-1] { + case '4': + return syscall.AF_INET, false + case '6': + return syscall.AF_INET6, true + } + + if mode == "listen" && (laddr == nil || laddr.isWildcard()) { + if supportsIPv4map() || !supportsIPv4() { + return syscall.AF_INET6, false + } + if laddr == nil { + return syscall.AF_INET, false + } + return laddr.family(), false + } + + if (laddr == nil || laddr.family() == syscall.AF_INET) && + (raddr == nil || raddr.family() == syscall.AF_INET) { + return syscall.AF_INET, false + } + return syscall.AF_INET6, false +} + +//from https://github.com/golang/go +//Changes: it is for SCTP only +func setDefaultSockopts(s int, family int, ipv6only bool) error { + if family == syscall.AF_INET6 { + // Allow both IP versions even if the OS default + // is otherwise. Note that some operating systems + // never admit this option. + syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, boolint(ipv6only)) + } + // Allow broadcast. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)) +} diff --git a/vendor/github.com/ishidawataru/sctp/sctp.go b/vendor/github.com/ishidawataru/sctp/sctp.go new file mode 100644 index 00000000..94842f42 --- /dev/null +++ b/vendor/github.com/ishidawataru/sctp/sctp.go @@ -0,0 +1,729 @@ +// Copyright 2019 Wataru Ishida. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sctp + +import ( + "bytes" + "encoding/binary" + "fmt" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + "unsafe" +) + +const ( + SOL_SCTP = 132 + + SCTP_BINDX_ADD_ADDR = 0x01 + SCTP_BINDX_REM_ADDR = 0x02 + + MSG_NOTIFICATION = 0x8000 +) + +const ( + SCTP_RTOINFO = iota + SCTP_ASSOCINFO + SCTP_INITMSG + SCTP_NODELAY + SCTP_AUTOCLOSE + SCTP_SET_PEER_PRIMARY_ADDR + SCTP_PRIMARY_ADDR + SCTP_ADAPTATION_LAYER + SCTP_DISABLE_FRAGMENTS + SCTP_PEER_ADDR_PARAMS + SCTP_DEFAULT_SENT_PARAM + SCTP_EVENTS + SCTP_I_WANT_MAPPED_V4_ADDR + SCTP_MAXSEG + SCTP_STATUS + SCTP_GET_PEER_ADDR_INFO + SCTP_DELAYED_ACK_TIME + SCTP_DELAYED_ACK = SCTP_DELAYED_ACK_TIME + SCTP_DELAYED_SACK = SCTP_DELAYED_ACK_TIME + + SCTP_SOCKOPT_BINDX_ADD = 100 + SCTP_SOCKOPT_BINDX_REM = 101 + SCTP_SOCKOPT_PEELOFF = 102 + SCTP_GET_PEER_ADDRS = 108 + SCTP_GET_LOCAL_ADDRS = 109 + SCTP_SOCKOPT_CONNECTX = 110 + SCTP_SOCKOPT_CONNECTX3 = 111 +) + +const ( + SCTP_EVENT_DATA_IO = 1 << iota + SCTP_EVENT_ASSOCIATION + SCTP_EVENT_ADDRESS + SCTP_EVENT_SEND_FAILURE + SCTP_EVENT_PEER_ERROR + SCTP_EVENT_SHUTDOWN + SCTP_EVENT_PARTIAL_DELIVERY + SCTP_EVENT_ADAPTATION_LAYER + SCTP_EVENT_AUTHENTICATION + SCTP_EVENT_SENDER_DRY + + SCTP_EVENT_ALL = SCTP_EVENT_DATA_IO | SCTP_EVENT_ASSOCIATION | SCTP_EVENT_ADDRESS | SCTP_EVENT_SEND_FAILURE | SCTP_EVENT_PEER_ERROR | SCTP_EVENT_SHUTDOWN | SCTP_EVENT_PARTIAL_DELIVERY | SCTP_EVENT_ADAPTATION_LAYER | SCTP_EVENT_AUTHENTICATION | SCTP_EVENT_SENDER_DRY +) + +type SCTPNotificationType int + +const ( + SCTP_SN_TYPE_BASE = SCTPNotificationType(iota + (1 << 15)) + SCTP_ASSOC_CHANGE + SCTP_PEER_ADDR_CHANGE + SCTP_SEND_FAILED + SCTP_REMOTE_ERROR + SCTP_SHUTDOWN_EVENT + SCTP_PARTIAL_DELIVERY_EVENT + SCTP_ADAPTATION_INDICATION + SCTP_AUTHENTICATION_INDICATION + SCTP_SENDER_DRY_EVENT +) + +type NotificationHandler func([]byte) error + +type EventSubscribe struct { + DataIO uint8 + Association uint8 + Address uint8 + SendFailure uint8 + PeerError uint8 + Shutdown uint8 + PartialDelivery uint8 + AdaptationLayer uint8 + Authentication uint8 + SenderDry uint8 +} + +const ( + SCTP_CMSG_INIT = iota + SCTP_CMSG_SNDRCV + SCTP_CMSG_SNDINFO + SCTP_CMSG_RCVINFO + SCTP_CMSG_NXTINFO +) + +const ( + SCTP_UNORDERED = 1 << iota + SCTP_ADDR_OVER + SCTP_ABORT + SCTP_SACK_IMMEDIATELY + SCTP_EOF +) + +const ( + SCTP_MAX_STREAM = 0xffff +) + +type InitMsg struct { + NumOstreams uint16 + MaxInstreams uint16 + MaxAttempts uint16 + MaxInitTimeout uint16 +} + +type SndRcvInfo struct { + Stream uint16 + SSN uint16 + Flags uint16 + _ uint16 + PPID uint32 + Context uint32 + TTL uint32 + TSN uint32 + CumTSN uint32 + AssocID int32 +} + +type SndInfo struct { + SID uint16 + Flags uint16 + PPID uint32 + Context uint32 + AssocID int32 +} + +type GetAddrsOld struct { + AssocID int32 + AddrNum int32 + Addrs uintptr +} + +type NotificationHeader struct { + Type uint16 + Flags uint16 + Length uint32 +} + +type SCTPState uint16 + +const ( + SCTP_COMM_UP = SCTPState(iota) + SCTP_COMM_LOST + SCTP_RESTART + SCTP_SHUTDOWN_COMP + SCTP_CANT_STR_ASSOC +) + +var nativeEndian binary.ByteOrder +var sndRcvInfoSize uintptr + +func init() { + i := uint16(1) + if *(*byte)(unsafe.Pointer(&i)) == 0 { + nativeEndian = binary.BigEndian + } else { + nativeEndian = binary.LittleEndian + } + info := SndRcvInfo{} + sndRcvInfoSize = unsafe.Sizeof(info) +} + +func toBuf(v interface{}) []byte { + var buf bytes.Buffer + binary.Write(&buf, nativeEndian, v) + return buf.Bytes() +} + +func htons(h uint16) uint16 { + if nativeEndian == binary.LittleEndian { + return (h << 8 & 0xff00) | (h >> 8 & 0xff) + } + return h +} + +var ntohs = htons + +// setInitOpts sets options for an SCTP association initialization +// see https://tools.ietf.org/html/rfc4960#page-25 +func setInitOpts(fd int, options InitMsg) error { + optlen := unsafe.Sizeof(options) + _, _, err := setsockopt(fd, SCTP_INITMSG, uintptr(unsafe.Pointer(&options)), uintptr(optlen)) + return err +} + +func setNumOstreams(fd, num int) error { + return setInitOpts(fd, InitMsg{NumOstreams: uint16(num)}) +} + +type SCTPAddr struct { + IPAddrs []net.IPAddr + Port int +} + +func (a *SCTPAddr) ToRawSockAddrBuf() []byte { + p := htons(uint16(a.Port)) + if len(a.IPAddrs) == 0 { // if a.IPAddrs list is empty - fall back to IPv4 zero addr + s := syscall.RawSockaddrInet4{ + Family: syscall.AF_INET, + Port: p, + } + copy(s.Addr[:], net.IPv4zero) + return toBuf(s) + } + buf := []byte{} + for _, ip := range a.IPAddrs { + ipBytes := ip.IP + if len(ipBytes) == 0 { + ipBytes = net.IPv4zero + } + if ip4 := ipBytes.To4(); ip4 != nil { + s := syscall.RawSockaddrInet4{ + Family: syscall.AF_INET, + Port: p, + } + copy(s.Addr[:], ip4) + buf = append(buf, toBuf(s)...) + } else { + var scopeid uint32 + ifi, err := net.InterfaceByName(ip.Zone) + if err == nil { + scopeid = uint32(ifi.Index) + } + s := syscall.RawSockaddrInet6{ + Family: syscall.AF_INET6, + Port: p, + Scope_id: scopeid, + } + copy(s.Addr[:], ipBytes) + buf = append(buf, toBuf(s)...) + } + } + return buf +} + +func (a *SCTPAddr) String() string { + var b bytes.Buffer + + for n, i := range a.IPAddrs { + if i.IP.To4() != nil { + b.WriteString(i.String()) + } else if i.IP.To16() != nil { + b.WriteRune('[') + b.WriteString(i.String()) + b.WriteRune(']') + } + if n < len(a.IPAddrs)-1 { + b.WriteRune('/') + } + } + b.WriteRune(':') + b.WriteString(strconv.Itoa(a.Port)) + return b.String() +} + +func (a *SCTPAddr) Network() string { return "sctp" } + +func ResolveSCTPAddr(network, addrs string) (*SCTPAddr, error) { + tcpnet := "" + switch network { + case "", "sctp": + tcpnet = "tcp" + case "sctp4": + tcpnet = "tcp4" + case "sctp6": + tcpnet = "tcp6" + default: + return nil, fmt.Errorf("invalid net: %s", network) + } + elems := strings.Split(addrs, "/") + if len(elems) == 0 { + return nil, fmt.Errorf("invalid input: %s", addrs) + } + ipaddrs := make([]net.IPAddr, 0, len(elems)) + for _, e := range elems[:len(elems)-1] { + tcpa, err := net.ResolveTCPAddr(tcpnet, e+":") + if err != nil { + return nil, err + } + ipaddrs = append(ipaddrs, net.IPAddr{IP: tcpa.IP, Zone: tcpa.Zone}) + } + tcpa, err := net.ResolveTCPAddr(tcpnet, elems[len(elems)-1]) + if err != nil { + return nil, err + } + if tcpa.IP != nil { + ipaddrs = append(ipaddrs, net.IPAddr{IP: tcpa.IP, Zone: tcpa.Zone}) + } else { + ipaddrs = nil + } + return &SCTPAddr{ + IPAddrs: ipaddrs, + Port: tcpa.Port, + }, nil +} + +func SCTPConnect(fd int, addr *SCTPAddr) (int, error) { + buf := addr.ToRawSockAddrBuf() + param := GetAddrsOld{ + AddrNum: int32(len(buf)), + Addrs: uintptr(uintptr(unsafe.Pointer(&buf[0]))), + } + optlen := unsafe.Sizeof(param) + _, _, err := getsockopt(fd, SCTP_SOCKOPT_CONNECTX3, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) + if err == nil { + return int(param.AssocID), nil + } else if err != syscall.ENOPROTOOPT { + return 0, err + } + r0, _, err := setsockopt(fd, SCTP_SOCKOPT_CONNECTX, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf))) + return int(r0), err +} + +func SCTPBind(fd int, addr *SCTPAddr, flags int) error { + var option uintptr + switch flags { + case SCTP_BINDX_ADD_ADDR: + option = SCTP_SOCKOPT_BINDX_ADD + case SCTP_BINDX_REM_ADDR: + option = SCTP_SOCKOPT_BINDX_REM + default: + return syscall.EINVAL + } + + buf := addr.ToRawSockAddrBuf() + _, _, err := setsockopt(fd, option, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf))) + return err +} + +type SCTPConn struct { + _fd int32 + notificationHandler NotificationHandler +} + +func (c *SCTPConn) fd() int { + return int(atomic.LoadInt32(&c._fd)) +} + +func NewSCTPConn(fd int, handler NotificationHandler) *SCTPConn { + conn := &SCTPConn{ + _fd: int32(fd), + notificationHandler: handler, + } + return conn +} + +func (c *SCTPConn) Write(b []byte) (int, error) { + return c.SCTPWrite(b, nil) +} + +func (c *SCTPConn) Read(b []byte) (int, error) { + n, _, err := c.SCTPRead(b) + if n < 0 { + n = 0 + } + return n, err +} + +func (c *SCTPConn) SetInitMsg(numOstreams, maxInstreams, maxAttempts, maxInitTimeout int) error { + return setInitOpts(c.fd(), InitMsg{ + NumOstreams: uint16(numOstreams), + MaxInstreams: uint16(maxInstreams), + MaxAttempts: uint16(maxAttempts), + MaxInitTimeout: uint16(maxInitTimeout), + }) +} + +func (c *SCTPConn) SubscribeEvents(flags int) error { + var d, a, ad, sf, p, sh, pa, ada, au, se uint8 + if flags&SCTP_EVENT_DATA_IO > 0 { + d = 1 + } + if flags&SCTP_EVENT_ASSOCIATION > 0 { + a = 1 + } + if flags&SCTP_EVENT_ADDRESS > 0 { + ad = 1 + } + if flags&SCTP_EVENT_SEND_FAILURE > 0 { + sf = 1 + } + if flags&SCTP_EVENT_PEER_ERROR > 0 { + p = 1 + } + if flags&SCTP_EVENT_SHUTDOWN > 0 { + sh = 1 + } + if flags&SCTP_EVENT_PARTIAL_DELIVERY > 0 { + pa = 1 + } + if flags&SCTP_EVENT_ADAPTATION_LAYER > 0 { + ada = 1 + } + if flags&SCTP_EVENT_AUTHENTICATION > 0 { + au = 1 + } + if flags&SCTP_EVENT_SENDER_DRY > 0 { + se = 1 + } + param := EventSubscribe{ + DataIO: d, + Association: a, + Address: ad, + SendFailure: sf, + PeerError: p, + Shutdown: sh, + PartialDelivery: pa, + AdaptationLayer: ada, + Authentication: au, + SenderDry: se, + } + optlen := unsafe.Sizeof(param) + _, _, err := setsockopt(c.fd(), SCTP_EVENTS, uintptr(unsafe.Pointer(¶m)), uintptr(optlen)) + return err +} + +func (c *SCTPConn) SubscribedEvents() (int, error) { + param := EventSubscribe{} + optlen := unsafe.Sizeof(param) + _, _, err := getsockopt(c.fd(), SCTP_EVENTS, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) + if err != nil { + return 0, err + } + var flags int + if param.DataIO > 0 { + flags |= SCTP_EVENT_DATA_IO + } + if param.Association > 0 { + flags |= SCTP_EVENT_ASSOCIATION + } + if param.Address > 0 { + flags |= SCTP_EVENT_ADDRESS + } + if param.SendFailure > 0 { + flags |= SCTP_EVENT_SEND_FAILURE + } + if param.PeerError > 0 { + flags |= SCTP_EVENT_PEER_ERROR + } + if param.Shutdown > 0 { + flags |= SCTP_EVENT_SHUTDOWN + } + if param.PartialDelivery > 0 { + flags |= SCTP_EVENT_PARTIAL_DELIVERY + } + if param.AdaptationLayer > 0 { + flags |= SCTP_EVENT_ADAPTATION_LAYER + } + if param.Authentication > 0 { + flags |= SCTP_EVENT_AUTHENTICATION + } + if param.SenderDry > 0 { + flags |= SCTP_EVENT_SENDER_DRY + } + return flags, nil +} + +func (c *SCTPConn) SetDefaultSentParam(info *SndRcvInfo) error { + optlen := unsafe.Sizeof(*info) + _, _, err := setsockopt(c.fd(), SCTP_DEFAULT_SENT_PARAM, uintptr(unsafe.Pointer(info)), uintptr(optlen)) + return err +} + +func (c *SCTPConn) GetDefaultSentParam() (*SndRcvInfo, error) { + info := &SndRcvInfo{} + optlen := unsafe.Sizeof(*info) + _, _, err := getsockopt(c.fd(), SCTP_DEFAULT_SENT_PARAM, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(&optlen))) + return info, err +} + +func resolveFromRawAddr(ptr unsafe.Pointer, n int) (*SCTPAddr, error) { + addr := &SCTPAddr{ + IPAddrs: make([]net.IPAddr, n), + } + + switch family := (*(*syscall.RawSockaddrAny)(ptr)).Addr.Family; family { + case syscall.AF_INET: + addr.Port = int(ntohs(uint16((*(*syscall.RawSockaddrInet4)(ptr)).Port))) + tmp := syscall.RawSockaddrInet4{} + size := unsafe.Sizeof(tmp) + for i := 0; i < n; i++ { + a := *(*syscall.RawSockaddrInet4)(unsafe.Pointer( + uintptr(ptr) + size*uintptr(i))) + addr.IPAddrs[i] = net.IPAddr{IP: a.Addr[:]} + } + case syscall.AF_INET6: + addr.Port = int(ntohs(uint16((*(*syscall.RawSockaddrInet4)(ptr)).Port))) + tmp := syscall.RawSockaddrInet6{} + size := unsafe.Sizeof(tmp) + for i := 0; i < n; i++ { + a := *(*syscall.RawSockaddrInet6)(unsafe.Pointer( + uintptr(ptr) + size*uintptr(i))) + var zone string + ifi, err := net.InterfaceByIndex(int(a.Scope_id)) + if err == nil { + zone = ifi.Name + } + addr.IPAddrs[i] = net.IPAddr{IP: a.Addr[:], Zone: zone} + } + default: + return nil, fmt.Errorf("unknown address family: %d", family) + } + return addr, nil +} + +func sctpGetAddrs(fd, id, optname int) (*SCTPAddr, error) { + + type getaddrs struct { + assocId int32 + addrNum uint32 + addrs [4096]byte + } + param := getaddrs{ + assocId: int32(id), + } + optlen := unsafe.Sizeof(param) + _, _, err := getsockopt(fd, uintptr(optname), uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) + if err != nil { + return nil, err + } + return resolveFromRawAddr(unsafe.Pointer(¶m.addrs), int(param.addrNum)) +} + +func (c *SCTPConn) SCTPGetPrimaryPeerAddr() (*SCTPAddr, error) { + + type sctpGetSetPrim struct { + assocId int32 + addrs [128]byte + } + param := sctpGetSetPrim{ + assocId: int32(0), + } + optlen := unsafe.Sizeof(param) + _, _, err := getsockopt(c.fd(), SCTP_PRIMARY_ADDR, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) + if err != nil { + return nil, err + } + return resolveFromRawAddr(unsafe.Pointer(¶m.addrs), 1) +} + +func (c *SCTPConn) SCTPLocalAddr(id int) (*SCTPAddr, error) { + return sctpGetAddrs(c.fd(), id, SCTP_GET_LOCAL_ADDRS) +} + +func (c *SCTPConn) SCTPRemoteAddr(id int) (*SCTPAddr, error) { + return sctpGetAddrs(c.fd(), id, SCTP_GET_PEER_ADDRS) +} + +func (c *SCTPConn) LocalAddr() net.Addr { + addr, err := sctpGetAddrs(c.fd(), 0, SCTP_GET_LOCAL_ADDRS) + if err != nil { + return nil + } + return addr +} + +func (c *SCTPConn) RemoteAddr() net.Addr { + addr, err := sctpGetAddrs(c.fd(), 0, SCTP_GET_PEER_ADDRS) + if err != nil { + return nil + } + return addr +} + +func (c *SCTPConn) PeelOff(id int) (*SCTPConn, error) { + type peeloffArg struct { + assocId int32 + sd int + } + param := peeloffArg{ + assocId: int32(id), + } + optlen := unsafe.Sizeof(param) + _, _, err := getsockopt(c.fd(), SCTP_SOCKOPT_PEELOFF, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) + if err != nil { + return nil, err + } + return &SCTPConn{_fd: int32(param.sd)}, nil +} + +func (c *SCTPConn) SetDeadline(t time.Time) error { + return syscall.EOPNOTSUPP +} + +func (c *SCTPConn) SetReadDeadline(t time.Time) error { + return syscall.EOPNOTSUPP +} + +func (c *SCTPConn) SetWriteDeadline(t time.Time) error { + return syscall.EOPNOTSUPP +} + +type SCTPListener struct { + fd int + m sync.Mutex +} + +func (ln *SCTPListener) Addr() net.Addr { + laddr, err := sctpGetAddrs(ln.fd, 0, SCTP_GET_LOCAL_ADDRS) + if err != nil { + return nil + } + return laddr +} + +type SCTPSndRcvInfoWrappedConn struct { + conn *SCTPConn +} + +func NewSCTPSndRcvInfoWrappedConn(conn *SCTPConn) *SCTPSndRcvInfoWrappedConn { + conn.SubscribeEvents(SCTP_EVENT_DATA_IO) + return &SCTPSndRcvInfoWrappedConn{conn} +} + +func (c *SCTPSndRcvInfoWrappedConn) Write(b []byte) (int, error) { + if len(b) < int(sndRcvInfoSize) { + return 0, syscall.EINVAL + } + info := (*SndRcvInfo)(unsafe.Pointer(&b[0])) + n, err := c.conn.SCTPWrite(b[sndRcvInfoSize:], info) + return n + int(sndRcvInfoSize), err +} + +func (c *SCTPSndRcvInfoWrappedConn) Read(b []byte) (int, error) { + if len(b) < int(sndRcvInfoSize) { + return 0, syscall.EINVAL + } + n, info, err := c.conn.SCTPRead(b[sndRcvInfoSize:]) + if err != nil { + return n, err + } + copy(b, toBuf(info)) + return n + int(sndRcvInfoSize), err +} + +func (c *SCTPSndRcvInfoWrappedConn) Close() error { + return c.conn.Close() +} + +func (c *SCTPSndRcvInfoWrappedConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *SCTPSndRcvInfoWrappedConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *SCTPSndRcvInfoWrappedConn) SetDeadline(t time.Time) error { + return c.conn.SetDeadline(t) +} + +func (c *SCTPSndRcvInfoWrappedConn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +func (c *SCTPSndRcvInfoWrappedConn) SetWriteDeadline(t time.Time) error { + return c.conn.SetWriteDeadline(t) +} + +func (c *SCTPSndRcvInfoWrappedConn) SetWriteBuffer(bytes int) error { + return c.conn.SetWriteBuffer(bytes) +} + +func (c *SCTPSndRcvInfoWrappedConn) GetWriteBuffer() (int, error) { + return c.conn.GetWriteBuffer() +} + +func (c *SCTPSndRcvInfoWrappedConn) SetReadBuffer(bytes int) error { + return c.conn.SetReadBuffer(bytes) +} + +func (c *SCTPSndRcvInfoWrappedConn) GetReadBuffer() (int, error) { + return c.conn.GetReadBuffer() +} + +// SocketConfig contains options for the SCTP socket. +type SocketConfig struct { + // If Control is not nil it is called after the socket is created but before + // it is bound or connected. + Control func(network, address string, c syscall.RawConn) error + + // InitMsg is the options to send in the initial SCTP message + InitMsg InitMsg +} + +func (cfg *SocketConfig) Listen(net string, laddr *SCTPAddr) (*SCTPListener, error) { + return listenSCTPExtConfig(net, laddr, cfg.InitMsg, cfg.Control) +} + +func (cfg *SocketConfig) Dial(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) { + return dialSCTPExtConfig(net, laddr, raddr, cfg.InitMsg, cfg.Control) +} diff --git a/vendor/github.com/ishidawataru/sctp/sctp_linux.go b/vendor/github.com/ishidawataru/sctp/sctp_linux.go new file mode 100644 index 00000000..ac340ddf --- /dev/null +++ b/vendor/github.com/ishidawataru/sctp/sctp_linux.go @@ -0,0 +1,305 @@ +// +build linux,!386 +// Copyright 2019 Wataru Ishida. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sctp + +import ( + "io" + "net" + "sync/atomic" + "syscall" + "unsafe" +) + +func setsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { + // FIXME: syscall.SYS_SETSOCKOPT is undefined on 386 + r0, r1, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, + uintptr(fd), + SOL_SCTP, + optname, + optval, + optlen, + 0) + if errno != 0 { + return r0, r1, errno + } + return r0, r1, nil +} + +func getsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { + // FIXME: syscall.SYS_GETSOCKOPT is undefined on 386 + r0, r1, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, + uintptr(fd), + SOL_SCTP, + optname, + optval, + optlen, + 0) + if errno != 0 { + return r0, r1, errno + } + return r0, r1, nil +} + +type rawConn struct { + sockfd int +} + +func (r rawConn) Control(f func(fd uintptr)) error { + f(uintptr(r.sockfd)) + return nil +} + +func (r rawConn) Read(f func(fd uintptr) (done bool)) error { + panic("not implemented") +} + +func (r rawConn) Write(f func(fd uintptr) (done bool)) error { + panic("not implemented") +} + +func (c *SCTPConn) SCTPWrite(b []byte, info *SndRcvInfo) (int, error) { + var cbuf []byte + if info != nil { + cmsgBuf := toBuf(info) + hdr := &syscall.Cmsghdr{ + Level: syscall.IPPROTO_SCTP, + Type: SCTP_CMSG_SNDRCV, + } + + // bitwidth of hdr.Len is platform-specific, + // so we use hdr.SetLen() rather than directly setting hdr.Len + hdr.SetLen(syscall.CmsgSpace(len(cmsgBuf))) + cbuf = append(toBuf(hdr), cmsgBuf...) + } + return syscall.SendmsgN(c.fd(), b, cbuf, nil, 0) +} + +func parseSndRcvInfo(b []byte) (*SndRcvInfo, error) { + msgs, err := syscall.ParseSocketControlMessage(b) + if err != nil { + return nil, err + } + for _, m := range msgs { + if m.Header.Level == syscall.IPPROTO_SCTP { + switch m.Header.Type { + case SCTP_CMSG_SNDRCV: + return (*SndRcvInfo)(unsafe.Pointer(&m.Data[0])), nil + } + } + } + return nil, nil +} + +func (c *SCTPConn) SCTPRead(b []byte) (int, *SndRcvInfo, error) { + oob := make([]byte, 254) + for { + n, oobn, recvflags, _, err := syscall.Recvmsg(c.fd(), b, oob, 0) + if err != nil { + return n, nil, err + } + + if n == 0 && oobn == 0 { + return 0, nil, io.EOF + } + + if recvflags&MSG_NOTIFICATION > 0 && c.notificationHandler != nil { + if err := c.notificationHandler(b[:n]); err != nil { + return 0, nil, err + } + } else { + var info *SndRcvInfo + if oobn > 0 { + info, err = parseSndRcvInfo(oob[:oobn]) + } + return n, info, err + } + } +} + +func (c *SCTPConn) Close() error { + if c != nil { + fd := atomic.SwapInt32(&c._fd, -1) + if fd > 0 { + info := &SndRcvInfo{ + Flags: SCTP_EOF, + } + c.SCTPWrite(nil, info) + syscall.Shutdown(int(fd), syscall.SHUT_RDWR) + return syscall.Close(int(fd)) + } + } + return syscall.EBADF +} + +func (c *SCTPConn) SetWriteBuffer(bytes int) error { + return syscall.SetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_SNDBUF, bytes) +} + +func (c *SCTPConn) GetWriteBuffer() (int, error) { + return syscall.GetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_SNDBUF) +} + +func (c *SCTPConn) SetReadBuffer(bytes int) error { + return syscall.SetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_RCVBUF, bytes) +} + +func (c *SCTPConn) GetReadBuffer() (int, error) { + return syscall.GetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_RCVBUF) +} + +// ListenSCTP - start listener on specified address/port +func ListenSCTP(net string, laddr *SCTPAddr) (*SCTPListener, error) { + return ListenSCTPExt(net, laddr, InitMsg{NumOstreams: SCTP_MAX_STREAM}) +} + +// ListenSCTPExt - start listener on specified address/port with given SCTP options +func ListenSCTPExt(network string, laddr *SCTPAddr, options InitMsg) (*SCTPListener, error) { + return listenSCTPExtConfig(network, laddr, options, nil) +} + +// listenSCTPExtConfig - start listener on specified address/port with given SCTP options and socket configuration +func listenSCTPExtConfig(network string, laddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPListener, error) { + af, ipv6only := favoriteAddrFamily(network, laddr, nil, "listen") + sock, err := syscall.Socket( + af, + syscall.SOCK_STREAM, + syscall.IPPROTO_SCTP, + ) + if err != nil { + return nil, err + } + + // close socket on error + defer func() { + if err != nil { + syscall.Close(sock) + } + }() + if err = setDefaultSockopts(sock, af, ipv6only); err != nil { + return nil, err + } + if control != nil { + rc := rawConn{sockfd: sock} + if err = control(network, laddr.String(), rc); err != nil { + return nil, err + } + } + err = setInitOpts(sock, options) + if err != nil { + return nil, err + } + + if laddr != nil { + // If IP address and/or port was not provided so far, let's use the unspecified IPv4 or IPv6 address + if len(laddr.IPAddrs) == 0 { + if af == syscall.AF_INET { + laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv4zero}) + } else if af == syscall.AF_INET6 { + laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv6zero}) + } + } + err := SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR) + if err != nil { + return nil, err + } + } + err = syscall.Listen(sock, syscall.SOMAXCONN) + if err != nil { + return nil, err + } + return &SCTPListener{ + fd: sock, + }, nil +} + +// AcceptSCTP waits for and returns the next SCTP connection to the listener. +func (ln *SCTPListener) AcceptSCTP() (*SCTPConn, error) { + fd, _, err := syscall.Accept4(ln.fd, 0) + return NewSCTPConn(fd, nil), err +} + +// Accept waits for and returns the next connection connection to the listener. +func (ln *SCTPListener) Accept() (net.Conn, error) { + return ln.AcceptSCTP() +} + +func (ln *SCTPListener) Close() error { + syscall.Shutdown(ln.fd, syscall.SHUT_RDWR) + return syscall.Close(ln.fd) +} + +// DialSCTP - bind socket to laddr (if given) and connect to raddr +func DialSCTP(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) { + return DialSCTPExt(net, laddr, raddr, InitMsg{NumOstreams: SCTP_MAX_STREAM}) +} + +// DialSCTPExt - same as DialSCTP but with given SCTP options +func DialSCTPExt(network string, laddr, raddr *SCTPAddr, options InitMsg) (*SCTPConn, error) { + return dialSCTPExtConfig(network, laddr, raddr, options, nil) +} + +// dialSCTPExtConfig - same as DialSCTP but with given SCTP options and socket configuration +func dialSCTPExtConfig(network string, laddr, raddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPConn, error) { + af, ipv6only := favoriteAddrFamily(network, laddr, raddr, "dial") + sock, err := syscall.Socket( + af, + syscall.SOCK_STREAM, + syscall.IPPROTO_SCTP, + ) + if err != nil { + return nil, err + } + + // close socket on error + defer func() { + if err != nil { + syscall.Close(sock) + } + }() + if err = setDefaultSockopts(sock, af, ipv6only); err != nil { + return nil, err + } + if control != nil { + rc := rawConn{sockfd: sock} + if err = control(network, laddr.String(), rc); err != nil { + return nil, err + } + } + err = setInitOpts(sock, options) + if err != nil { + return nil, err + } + if laddr != nil { + // If IP address and/or port was not provided so far, let's use the unspecified IPv4 or IPv6 address + if len(laddr.IPAddrs) == 0 { + if af == syscall.AF_INET { + laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv4zero}) + } else if af == syscall.AF_INET6 { + laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv6zero}) + } + } + err := SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR) + if err != nil { + return nil, err + } + } + _, err = SCTPConnect(sock, raddr) + if err != nil { + return nil, err + } + return NewSCTPConn(sock, nil), nil +} diff --git a/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go b/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go new file mode 100644 index 00000000..118fe159 --- /dev/null +++ b/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go @@ -0,0 +1,98 @@ +// +build !linux linux,386 +// Copyright 2019 Wataru Ishida. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sctp + +import ( + "errors" + "net" + "runtime" + "syscall" +) + +var ErrUnsupported = errors.New("SCTP is unsupported on " + runtime.GOOS + "/" + runtime.GOARCH) + +func setsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { + return 0, 0, ErrUnsupported +} + +func getsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { + return 0, 0, ErrUnsupported +} + +func (c *SCTPConn) SCTPWrite(b []byte, info *SndRcvInfo) (int, error) { + return 0, ErrUnsupported +} + +func (c *SCTPConn) SCTPRead(b []byte) (int, *SndRcvInfo, error) { + return 0, nil, ErrUnsupported +} + +func (c *SCTPConn) Close() error { + return ErrUnsupported +} + +func (c *SCTPConn) SetWriteBuffer(bytes int) error { + return ErrUnsupported +} + +func (c *SCTPConn) GetWriteBuffer() (int, error) { + return 0, ErrUnsupported +} + +func (c *SCTPConn) SetReadBuffer(bytes int) error { + return ErrUnsupported +} + +func (c *SCTPConn) GetReadBuffer() (int, error) { + return 0, ErrUnsupported +} + +func ListenSCTP(net string, laddr *SCTPAddr) (*SCTPListener, error) { + return nil, ErrUnsupported +} + +func ListenSCTPExt(net string, laddr *SCTPAddr, options InitMsg) (*SCTPListener, error) { + return nil, ErrUnsupported +} + +func listenSCTPExtConfig(network string, laddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPListener, error) { + return nil, ErrUnsupported +} + +func (ln *SCTPListener) Accept() (net.Conn, error) { + return nil, ErrUnsupported +} + +func (ln *SCTPListener) AcceptSCTP() (*SCTPConn, error) { + return nil, ErrUnsupported +} + +func (ln *SCTPListener) Close() error { + return ErrUnsupported +} + +func DialSCTP(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) { + return nil, ErrUnsupported +} + +func DialSCTPExt(network string, laddr, raddr *SCTPAddr, options InitMsg) (*SCTPConn, error) { + return nil, ErrUnsupported +} + +func dialSCTPExtConfig(network string, laddr, raddr *SCTPAddr, options InitMsg, control func(network, address string, c syscall.RawConn) error) (*SCTPConn, error) { + return nil, ErrUnsupported +} diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE new file mode 100644 index 00000000..a3866a29 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md new file mode 100644 index 00000000..28ce45a3 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/README.md @@ -0,0 +1,65 @@ +# hashstructure [![GoDoc](https://godoc.org/github.com/mitchellh/hashstructure?status.svg)](https://godoc.org/github.com/mitchellh/hashstructure) + +hashstructure is a Go library for creating a unique hash value +for arbitrary values in Go. + +This can be used to key values in a hash (for use in a map, set, etc.) +that are complex. The most common use case is comparing two values without +sending data across the network, caching values locally (de-dup), and so on. + +## Features + + * Hash any arbitrary Go value, including complex types. + + * Tag a struct field to ignore it and not affect the hash value. + + * Tag a slice type struct field to treat it as a set where ordering + doesn't affect the hash code but the field itself is still taken into + account to create the hash value. + + * Optionally specify a custom hash function to optimize for speed, collision + avoidance for your data set, etc. + + * Optionally hash the output of `.String()` on structs that implement fmt.Stringer, + allowing effective hashing of time.Time + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/hashstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure). + +A quick code example is shown below: + +```go +type ComplexStruct struct { + Name string + Age uint + Metadata map[string]interface{} +} + +v := ComplexStruct{ + Name: "mitchellh", + Age: 64, + Metadata: map[string]interface{}{ + "car": true, + "location": "California", + "siblings": []string{"Bob", "John"}, + }, +} + +hash, err := hashstructure.Hash(v, nil) +if err != nil { + panic(err) +} + +fmt.Printf("%d", hash) +// Output: +// 2307517237273902113 +``` diff --git a/vendor/github.com/mitchellh/hashstructure/go.mod b/vendor/github.com/mitchellh/hashstructure/go.mod new file mode 100644 index 00000000..966582aa --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/hashstructure diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go new file mode 100644 index 00000000..ea13a158 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/hashstructure.go @@ -0,0 +1,358 @@ +package hashstructure + +import ( + "encoding/binary" + "fmt" + "hash" + "hash/fnv" + "reflect" +) + +// ErrNotStringer is returned when there's an error with hash:"string" +type ErrNotStringer struct { + Field string +} + +// Error implements error for ErrNotStringer +func (ens *ErrNotStringer) Error() string { + return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field) +} + +// HashOptions are options that are available for hashing. +type HashOptions struct { + // Hasher is the hash function to use. If this isn't set, it will + // default to FNV. + Hasher hash.Hash64 + + // TagName is the struct tag to look at when hashing the structure. + // By default this is "hash". + TagName string + + // ZeroNil is flag determining if nil pointer should be treated equal + // to a zero value of pointed type. By default this is false. + ZeroNil bool +} + +// Hash returns the hash value of an arbitrary value. +// +// If opts is nil, then default options will be used. See HashOptions +// for the default values. The same *HashOptions value cannot be used +// concurrently. None of the values within a *HashOptions struct are +// safe to read/write while hashing is being done. +// +// Notes on the value: +// +// * Unexported fields on structs are ignored and do not affect the +// hash value. +// +// * Adding an exported field to a struct with the zero value will change +// the hash value. +// +// For structs, the hashing can be controlled using tags. For example: +// +// struct { +// Name string +// UUID string `hash:"ignore"` +// } +// +// The available tag values are: +// +// * "ignore" or "-" - The field will be ignored and not affect the hash code. +// +// * "set" - The field will be treated as a set, where ordering doesn't +// affect the hash code. This only works for slices. +// +// * "string" - The field will be hashed as a string, only works when the +// field implements fmt.Stringer +// +func Hash(v interface{}, opts *HashOptions) (uint64, error) { + // Create default options + if opts == nil { + opts = &HashOptions{} + } + if opts.Hasher == nil { + opts.Hasher = fnv.New64() + } + if opts.TagName == "" { + opts.TagName = "hash" + } + + // Reset the hash + opts.Hasher.Reset() + + // Create our walker and walk the structure + w := &walker{ + h: opts.Hasher, + tag: opts.TagName, + zeronil: opts.ZeroNil, + } + return w.visit(reflect.ValueOf(v), nil) +} + +type walker struct { + h hash.Hash64 + tag string + zeronil bool +} + +type visitOpts struct { + // Flags are a bitmask of flags to affect behavior of this visit + Flags visitFlag + + // Information about the struct containing this field + Struct interface{} + StructField string +} + +func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { + t := reflect.TypeOf(0) + + // Loop since these can be wrapped in multiple layers of pointers + // and interfaces. + for { + // If we have an interface, dereference it. We have to do this up + // here because it might be a nil in there and the check below must + // catch that. + if v.Kind() == reflect.Interface { + v = v.Elem() + continue + } + + if v.Kind() == reflect.Ptr { + if w.zeronil { + t = v.Type().Elem() + } + v = reflect.Indirect(v) + continue + } + + break + } + + // If it is nil, treat it like a zero. + if !v.IsValid() { + v = reflect.Zero(t) + } + + // Binary writing can use raw ints, we have to convert to + // a sized-int, we'll choose the largest... + switch v.Kind() { + case reflect.Int: + v = reflect.ValueOf(int64(v.Int())) + case reflect.Uint: + v = reflect.ValueOf(uint64(v.Uint())) + case reflect.Bool: + var tmp int8 + if v.Bool() { + tmp = 1 + } + v = reflect.ValueOf(tmp) + } + + k := v.Kind() + + // We can shortcut numeric values by directly binary writing them + if k >= reflect.Int && k <= reflect.Complex64 { + // A direct hash calculation + w.h.Reset() + err := binary.Write(w.h, binary.LittleEndian, v.Interface()) + return w.h.Sum64(), err + } + + switch k { + case reflect.Array: + var h uint64 + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + h = hashUpdateOrdered(w.h, h, current) + } + + return h, nil + + case reflect.Map: + var includeMap IncludableMap + if opts != nil && opts.Struct != nil { + if v, ok := opts.Struct.(IncludableMap); ok { + includeMap = v + } + } + + // Build the hash for the map. We do this by XOR-ing all the key + // and value hashes. This makes it deterministic despite ordering. + var h uint64 + for _, k := range v.MapKeys() { + v := v.MapIndex(k) + if includeMap != nil { + incl, err := includeMap.HashIncludeMap( + opts.StructField, k.Interface(), v.Interface()) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + kh, err := w.visit(k, nil) + if err != nil { + return 0, err + } + vh, err := w.visit(v, nil) + if err != nil { + return 0, err + } + + fieldHash := hashUpdateOrdered(w.h, kh, vh) + h = hashUpdateUnordered(h, fieldHash) + } + + return h, nil + + case reflect.Struct: + parent := v.Interface() + var include Includable + if impl, ok := parent.(Includable); ok { + include = impl + } + + t := v.Type() + h, err := w.visit(reflect.ValueOf(t.Name()), nil) + if err != nil { + return 0, err + } + + l := v.NumField() + for i := 0; i < l; i++ { + if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + var f visitFlag + fieldType := t.Field(i) + if fieldType.PkgPath != "" { + // Unexported + continue + } + + tag := fieldType.Tag.Get(w.tag) + if tag == "ignore" || tag == "-" { + // Ignore this field + continue + } + + // if string is set, use the string value + if tag == "string" { + if impl, ok := innerV.Interface().(fmt.Stringer); ok { + innerV = reflect.ValueOf(impl.String()) + } else { + return 0, &ErrNotStringer{ + Field: v.Type().Field(i).Name, + } + } + } + + // Check if we implement includable and check it + if include != nil { + incl, err := include.HashInclude(fieldType.Name, innerV) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + switch tag { + case "set": + f |= visitFlagSet + } + + kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil) + if err != nil { + return 0, err + } + + vh, err := w.visit(innerV, &visitOpts{ + Flags: f, + Struct: parent, + StructField: fieldType.Name, + }) + if err != nil { + return 0, err + } + + fieldHash := hashUpdateOrdered(w.h, kh, vh) + h = hashUpdateUnordered(h, fieldHash) + } + } + + return h, nil + + case reflect.Slice: + // We have two behaviors here. If it isn't a set, then we just + // visit all the elements. If it is a set, then we do a deterministic + // hash code. + var h uint64 + var set bool + if opts != nil { + set = (opts.Flags & visitFlagSet) != 0 + } + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + if set { + h = hashUpdateUnordered(h, current) + } else { + h = hashUpdateOrdered(w.h, h, current) + } + } + + return h, nil + + case reflect.String: + // Directly hash + w.h.Reset() + _, err := w.h.Write([]byte(v.String())) + return w.h.Sum64(), err + + default: + return 0, fmt.Errorf("unknown kind to hash: %s", k) + } + +} + +func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 { + // For ordered updates, use a real hash function + h.Reset() + + // We just panic if the binary writes fail because we are writing + // an int64 which should never be fail-able. + e1 := binary.Write(h, binary.LittleEndian, a) + e2 := binary.Write(h, binary.LittleEndian, b) + if e1 != nil { + panic(e1) + } + if e2 != nil { + panic(e2) + } + + return h.Sum64() +} + +func hashUpdateUnordered(a, b uint64) uint64 { + return a ^ b +} + +// visitFlag is used as a bitmask for affecting visit behavior +type visitFlag uint + +const ( + visitFlagInvalid visitFlag = iota + visitFlagSet = iota << 1 +) diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go new file mode 100644 index 00000000..b6289c0b --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/include.go @@ -0,0 +1,15 @@ +package hashstructure + +// Includable is an interface that can optionally be implemented by +// a struct. It will be called for each field in the struct to check whether +// it should be included in the hash. +type Includable interface { + HashInclude(field string, v interface{}) (bool, error) +} + +// IncludableMap is an interface that can optionally be implemented by +// a struct. It will be called when a map-type field is found to ask the +// struct if the map item should be included in the hash. +type IncludableMap interface { + HashIncludeMap(field string, k, v interface{}) (bool, error) +} diff --git a/vendor/github.com/moby/buildkit/AUTHORS b/vendor/github.com/moby/buildkit/AUTHORS new file mode 100644 index 00000000..c1dce655 --- /dev/null +++ b/vendor/github.com/moby/buildkit/AUTHORS @@ -0,0 +1,66 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `scripts/generate-authors.sh`. + +Aaron L. Xu +Aaron Lehmann +Akihiro Suda +Alexander Morozov +Alice Frosi +Allen Sun +Anda Xu +Anthony Sottile +Arnaud Bailly +Bin Liu +Brian Goff +Daniel Nephin +Dave Chen +David Calavera +Dennis Chen +Derek McGowan +Doug Davis +Edgar Lee +Eli Uriegas +f0 +Fernando Miguel +Hao Hu +Helen Xie +Himanshu Pandey +Hiromu Nakamura +Ian Campbell +Iskander (Alex) Sharipov +Jean-Pierre Huynh +Jessica Frazelle +John Howard +Jonathan Stoppani +Justas Brazauskas +Justin Cormack +Kunal Kushwaha +Lajos Papp +Matt Rickard +Michael Crosby +Miyachi Katsuya +Nao YONASHIRO +Natasha Jarus +Noel Georgi <18496730+frezbo@users.noreply.github.com> +Ondrej Fabry +Patrick Van Stee +Ri Xu +Sebastiaan van Stijn +Shev Yan +Simon Ferquel +Stefan Weil +Thomas Leonard +Thomas Shaw +Tibor Vass +Tiffany Jernigan +Tino Rusch +Tobias Klauser +Tomas Tomecek +Tomohiro Kusumoto +Tõnis Tiigi +Vincent Demeester +Wei Fu +Yong Tang +Yuichiro Kaneko +Ziv Tsarfati +郑泽宇 diff --git a/vendor/github.com/moby/buildkit/LICENSE b/vendor/github.com/moby/buildkit/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/moby/buildkit/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go new file mode 100644 index 00000000..fdc10596 --- /dev/null +++ b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go @@ -0,0 +1,6477 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: control.proto + +package moby_buildkit_v1 + +import ( + context "context" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + _ "github.com/golang/protobuf/ptypes/timestamp" + types "github.com/moby/buildkit/api/types" + pb "github.com/moby/buildkit/solver/pb" + github_com_moby_buildkit_util_entitlements "github.com/moby/buildkit/util/entitlements" + github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type PruneRequest struct { + Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` + All bool `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` + KeepDuration int64 `protobuf:"varint,3,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"` + KeepBytes int64 `protobuf:"varint,4,opt,name=keepBytes,proto3" json:"keepBytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PruneRequest) Reset() { *m = PruneRequest{} } +func (m *PruneRequest) String() string { return proto.CompactTextString(m) } +func (*PruneRequest) ProtoMessage() {} +func (*PruneRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{0} +} +func (m *PruneRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PruneRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PruneRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PruneRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PruneRequest.Merge(m, src) +} +func (m *PruneRequest) XXX_Size() int { + return m.Size() +} +func (m *PruneRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PruneRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PruneRequest proto.InternalMessageInfo + +func (m *PruneRequest) GetFilter() []string { + if m != nil { + return m.Filter + } + return nil +} + +func (m *PruneRequest) GetAll() bool { + if m != nil { + return m.All + } + return false +} + +func (m *PruneRequest) GetKeepDuration() int64 { + if m != nil { + return m.KeepDuration + } + return 0 +} + +func (m *PruneRequest) GetKeepBytes() int64 { + if m != nil { + return m.KeepBytes + } + return 0 +} + +type DiskUsageRequest struct { + Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DiskUsageRequest) Reset() { *m = DiskUsageRequest{} } +func (m *DiskUsageRequest) String() string { return proto.CompactTextString(m) } +func (*DiskUsageRequest) ProtoMessage() {} +func (*DiskUsageRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{1} +} +func (m *DiskUsageRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DiskUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DiskUsageRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DiskUsageRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DiskUsageRequest.Merge(m, src) +} +func (m *DiskUsageRequest) XXX_Size() int { + return m.Size() +} +func (m *DiskUsageRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DiskUsageRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DiskUsageRequest proto.InternalMessageInfo + +func (m *DiskUsageRequest) GetFilter() []string { + if m != nil { + return m.Filter + } + return nil +} + +type DiskUsageResponse struct { + Record []*UsageRecord `protobuf:"bytes,1,rep,name=record,proto3" json:"record,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DiskUsageResponse) Reset() { *m = DiskUsageResponse{} } +func (m *DiskUsageResponse) String() string { return proto.CompactTextString(m) } +func (*DiskUsageResponse) ProtoMessage() {} +func (*DiskUsageResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{2} +} +func (m *DiskUsageResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DiskUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DiskUsageResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DiskUsageResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DiskUsageResponse.Merge(m, src) +} +func (m *DiskUsageResponse) XXX_Size() int { + return m.Size() +} +func (m *DiskUsageResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DiskUsageResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DiskUsageResponse proto.InternalMessageInfo + +func (m *DiskUsageResponse) GetRecord() []*UsageRecord { + if m != nil { + return m.Record + } + return nil +} + +type UsageRecord struct { + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Mutable bool `protobuf:"varint,2,opt,name=Mutable,proto3" json:"Mutable,omitempty"` + InUse bool `protobuf:"varint,3,opt,name=InUse,proto3" json:"InUse,omitempty"` + Size_ int64 `protobuf:"varint,4,opt,name=Size,proto3" json:"Size,omitempty"` + Parent string `protobuf:"bytes,5,opt,name=Parent,proto3" json:"Parent,omitempty"` + CreatedAt time.Time `protobuf:"bytes,6,opt,name=CreatedAt,proto3,stdtime" json:"CreatedAt"` + LastUsedAt *time.Time `protobuf:"bytes,7,opt,name=LastUsedAt,proto3,stdtime" json:"LastUsedAt,omitempty"` + UsageCount int64 `protobuf:"varint,8,opt,name=UsageCount,proto3" json:"UsageCount,omitempty"` + Description string `protobuf:"bytes,9,opt,name=Description,proto3" json:"Description,omitempty"` + RecordType string `protobuf:"bytes,10,opt,name=RecordType,proto3" json:"RecordType,omitempty"` + Shared bool `protobuf:"varint,11,opt,name=Shared,proto3" json:"Shared,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UsageRecord) Reset() { *m = UsageRecord{} } +func (m *UsageRecord) String() string { return proto.CompactTextString(m) } +func (*UsageRecord) ProtoMessage() {} +func (*UsageRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{3} +} +func (m *UsageRecord) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UsageRecord.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UsageRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_UsageRecord.Merge(m, src) +} +func (m *UsageRecord) XXX_Size() int { + return m.Size() +} +func (m *UsageRecord) XXX_DiscardUnknown() { + xxx_messageInfo_UsageRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_UsageRecord proto.InternalMessageInfo + +func (m *UsageRecord) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *UsageRecord) GetMutable() bool { + if m != nil { + return m.Mutable + } + return false +} + +func (m *UsageRecord) GetInUse() bool { + if m != nil { + return m.InUse + } + return false +} + +func (m *UsageRecord) GetSize_() int64 { + if m != nil { + return m.Size_ + } + return 0 +} + +func (m *UsageRecord) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *UsageRecord) GetCreatedAt() time.Time { + if m != nil { + return m.CreatedAt + } + return time.Time{} +} + +func (m *UsageRecord) GetLastUsedAt() *time.Time { + if m != nil { + return m.LastUsedAt + } + return nil +} + +func (m *UsageRecord) GetUsageCount() int64 { + if m != nil { + return m.UsageCount + } + return 0 +} + +func (m *UsageRecord) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *UsageRecord) GetRecordType() string { + if m != nil { + return m.RecordType + } + return "" +} + +func (m *UsageRecord) GetShared() bool { + if m != nil { + return m.Shared + } + return false +} + +type SolveRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + Definition *pb.Definition `protobuf:"bytes,2,opt,name=Definition,proto3" json:"Definition,omitempty"` + Exporter string `protobuf:"bytes,3,opt,name=Exporter,proto3" json:"Exporter,omitempty"` + ExporterAttrs map[string]string `protobuf:"bytes,4,rep,name=ExporterAttrs,proto3" json:"ExporterAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Session string `protobuf:"bytes,5,opt,name=Session,proto3" json:"Session,omitempty"` + Frontend string `protobuf:"bytes,6,opt,name=Frontend,proto3" json:"Frontend,omitempty"` + FrontendAttrs map[string]string `protobuf:"bytes,7,rep,name=FrontendAttrs,proto3" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Cache CacheOptions `protobuf:"bytes,8,opt,name=Cache,proto3" json:"Cache"` + Entitlements []github_com_moby_buildkit_util_entitlements.Entitlement `protobuf:"bytes,9,rep,name=Entitlements,proto3,customtype=github.com/moby/buildkit/util/entitlements.Entitlement" json:"Entitlements,omitempty"` + FrontendInputs map[string]*pb.Definition `protobuf:"bytes,10,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SolveRequest) Reset() { *m = SolveRequest{} } +func (m *SolveRequest) String() string { return proto.CompactTextString(m) } +func (*SolveRequest) ProtoMessage() {} +func (*SolveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{4} +} +func (m *SolveRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SolveRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SolveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SolveRequest.Merge(m, src) +} +func (m *SolveRequest) XXX_Size() int { + return m.Size() +} +func (m *SolveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SolveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SolveRequest proto.InternalMessageInfo + +func (m *SolveRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +func (m *SolveRequest) GetDefinition() *pb.Definition { + if m != nil { + return m.Definition + } + return nil +} + +func (m *SolveRequest) GetExporter() string { + if m != nil { + return m.Exporter + } + return "" +} + +func (m *SolveRequest) GetExporterAttrs() map[string]string { + if m != nil { + return m.ExporterAttrs + } + return nil +} + +func (m *SolveRequest) GetSession() string { + if m != nil { + return m.Session + } + return "" +} + +func (m *SolveRequest) GetFrontend() string { + if m != nil { + return m.Frontend + } + return "" +} + +func (m *SolveRequest) GetFrontendAttrs() map[string]string { + if m != nil { + return m.FrontendAttrs + } + return nil +} + +func (m *SolveRequest) GetCache() CacheOptions { + if m != nil { + return m.Cache + } + return CacheOptions{} +} + +func (m *SolveRequest) GetFrontendInputs() map[string]*pb.Definition { + if m != nil { + return m.FrontendInputs + } + return nil +} + +type CacheOptions struct { + // ExportRefDeprecated is deprecated in favor or the new Exports since BuildKit v0.4.0. + // When ExportRefDeprecated is set, the solver appends + // {.Type = "registry", .Attrs = ExportAttrs.add("ref", ExportRef)} + // to Exports for compatibility. (planned to be removed) + ExportRefDeprecated string `protobuf:"bytes,1,opt,name=ExportRefDeprecated,proto3" json:"ExportRefDeprecated,omitempty"` + // ImportRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0. + // When ImportRefsDeprecated is set, the solver appends + // {.Type = "registry", .Attrs = {"ref": importRef}} + // for each of the ImportRefs entry to Imports for compatibility. (planned to be removed) + ImportRefsDeprecated []string `protobuf:"bytes,2,rep,name=ImportRefsDeprecated,proto3" json:"ImportRefsDeprecated,omitempty"` + // ExportAttrsDeprecated is deprecated since BuildKit v0.4.0. + // See the description of ExportRefDeprecated. + ExportAttrsDeprecated map[string]string `protobuf:"bytes,3,rep,name=ExportAttrsDeprecated,proto3" json:"ExportAttrsDeprecated,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Exports was introduced in BuildKit v0.4.0. + Exports []*CacheOptionsEntry `protobuf:"bytes,4,rep,name=Exports,proto3" json:"Exports,omitempty"` + // Imports was introduced in BuildKit v0.4.0. + Imports []*CacheOptionsEntry `protobuf:"bytes,5,rep,name=Imports,proto3" json:"Imports,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CacheOptions) Reset() { *m = CacheOptions{} } +func (m *CacheOptions) String() string { return proto.CompactTextString(m) } +func (*CacheOptions) ProtoMessage() {} +func (*CacheOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{5} +} +func (m *CacheOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CacheOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CacheOptions.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CacheOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_CacheOptions.Merge(m, src) +} +func (m *CacheOptions) XXX_Size() int { + return m.Size() +} +func (m *CacheOptions) XXX_DiscardUnknown() { + xxx_messageInfo_CacheOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_CacheOptions proto.InternalMessageInfo + +func (m *CacheOptions) GetExportRefDeprecated() string { + if m != nil { + return m.ExportRefDeprecated + } + return "" +} + +func (m *CacheOptions) GetImportRefsDeprecated() []string { + if m != nil { + return m.ImportRefsDeprecated + } + return nil +} + +func (m *CacheOptions) GetExportAttrsDeprecated() map[string]string { + if m != nil { + return m.ExportAttrsDeprecated + } + return nil +} + +func (m *CacheOptions) GetExports() []*CacheOptionsEntry { + if m != nil { + return m.Exports + } + return nil +} + +func (m *CacheOptions) GetImports() []*CacheOptionsEntry { + if m != nil { + return m.Imports + } + return nil +} + +type CacheOptionsEntry struct { + // Type is like "registry" or "local" + Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` + // Attrs are like mode=(min,max), ref=example.com:5000/foo/bar . + // See cache importer/exporter implementations' documentation. + Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CacheOptionsEntry) Reset() { *m = CacheOptionsEntry{} } +func (m *CacheOptionsEntry) String() string { return proto.CompactTextString(m) } +func (*CacheOptionsEntry) ProtoMessage() {} +func (*CacheOptionsEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{6} +} +func (m *CacheOptionsEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CacheOptionsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CacheOptionsEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CacheOptionsEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_CacheOptionsEntry.Merge(m, src) +} +func (m *CacheOptionsEntry) XXX_Size() int { + return m.Size() +} +func (m *CacheOptionsEntry) XXX_DiscardUnknown() { + xxx_messageInfo_CacheOptionsEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_CacheOptionsEntry proto.InternalMessageInfo + +func (m *CacheOptionsEntry) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *CacheOptionsEntry) GetAttrs() map[string]string { + if m != nil { + return m.Attrs + } + return nil +} + +type SolveResponse struct { + ExporterResponse map[string]string `protobuf:"bytes,1,rep,name=ExporterResponse,proto3" json:"ExporterResponse,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SolveResponse) Reset() { *m = SolveResponse{} } +func (m *SolveResponse) String() string { return proto.CompactTextString(m) } +func (*SolveResponse) ProtoMessage() {} +func (*SolveResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{7} +} +func (m *SolveResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SolveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SolveResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SolveResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SolveResponse.Merge(m, src) +} +func (m *SolveResponse) XXX_Size() int { + return m.Size() +} +func (m *SolveResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SolveResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SolveResponse proto.InternalMessageInfo + +func (m *SolveResponse) GetExporterResponse() map[string]string { + if m != nil { + return m.ExporterResponse + } + return nil +} + +type StatusRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (m *StatusRequest) String() string { return proto.CompactTextString(m) } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{8} +} +func (m *StatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusRequest.Merge(m, src) +} +func (m *StatusRequest) XXX_Size() int { + return m.Size() +} +func (m *StatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusRequest proto.InternalMessageInfo + +func (m *StatusRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +type StatusResponse struct { + Vertexes []*Vertex `protobuf:"bytes,1,rep,name=vertexes,proto3" json:"vertexes,omitempty"` + Statuses []*VertexStatus `protobuf:"bytes,2,rep,name=statuses,proto3" json:"statuses,omitempty"` + Logs []*VertexLog `protobuf:"bytes,3,rep,name=logs,proto3" json:"logs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (m *StatusResponse) String() string { return proto.CompactTextString(m) } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{9} +} +func (m *StatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusResponse.Merge(m, src) +} +func (m *StatusResponse) XXX_Size() int { + return m.Size() +} +func (m *StatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusResponse proto.InternalMessageInfo + +func (m *StatusResponse) GetVertexes() []*Vertex { + if m != nil { + return m.Vertexes + } + return nil +} + +func (m *StatusResponse) GetStatuses() []*VertexStatus { + if m != nil { + return m.Statuses + } + return nil +} + +func (m *StatusResponse) GetLogs() []*VertexLog { + if m != nil { + return m.Logs + } + return nil +} + +type Vertex struct { + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Inputs []github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,rep,name=inputs,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"inputs"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Cached bool `protobuf:"varint,4,opt,name=cached,proto3" json:"cached,omitempty"` + Started *time.Time `protobuf:"bytes,5,opt,name=started,proto3,stdtime" json:"started,omitempty"` + Completed *time.Time `protobuf:"bytes,6,opt,name=completed,proto3,stdtime" json:"completed,omitempty"` + Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Vertex) Reset() { *m = Vertex{} } +func (m *Vertex) String() string { return proto.CompactTextString(m) } +func (*Vertex) ProtoMessage() {} +func (*Vertex) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{10} +} +func (m *Vertex) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Vertex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Vertex.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Vertex) XXX_Merge(src proto.Message) { + xxx_messageInfo_Vertex.Merge(m, src) +} +func (m *Vertex) XXX_Size() int { + return m.Size() +} +func (m *Vertex) XXX_DiscardUnknown() { + xxx_messageInfo_Vertex.DiscardUnknown(m) +} + +var xxx_messageInfo_Vertex proto.InternalMessageInfo + +func (m *Vertex) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Vertex) GetCached() bool { + if m != nil { + return m.Cached + } + return false +} + +func (m *Vertex) GetStarted() *time.Time { + if m != nil { + return m.Started + } + return nil +} + +func (m *Vertex) GetCompleted() *time.Time { + if m != nil { + return m.Completed + } + return nil +} + +func (m *Vertex) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type VertexStatus struct { + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Vertex github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=vertex,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"vertex"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Current int64 `protobuf:"varint,4,opt,name=current,proto3" json:"current,omitempty"` + Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` + // TODO: add started, completed + Timestamp time.Time `protobuf:"bytes,6,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + Started *time.Time `protobuf:"bytes,7,opt,name=started,proto3,stdtime" json:"started,omitempty"` + Completed *time.Time `protobuf:"bytes,8,opt,name=completed,proto3,stdtime" json:"completed,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VertexStatus) Reset() { *m = VertexStatus{} } +func (m *VertexStatus) String() string { return proto.CompactTextString(m) } +func (*VertexStatus) ProtoMessage() {} +func (*VertexStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{11} +} +func (m *VertexStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VertexStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VertexStatus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VertexStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VertexStatus.Merge(m, src) +} +func (m *VertexStatus) XXX_Size() int { + return m.Size() +} +func (m *VertexStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VertexStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_VertexStatus proto.InternalMessageInfo + +func (m *VertexStatus) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *VertexStatus) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *VertexStatus) GetCurrent() int64 { + if m != nil { + return m.Current + } + return 0 +} + +func (m *VertexStatus) GetTotal() int64 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *VertexStatus) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *VertexStatus) GetStarted() *time.Time { + if m != nil { + return m.Started + } + return nil +} + +func (m *VertexStatus) GetCompleted() *time.Time { + if m != nil { + return m.Completed + } + return nil +} + +type VertexLog struct { + Vertex github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=vertex,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"vertex"` + Timestamp time.Time `protobuf:"bytes,2,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + Stream int64 `protobuf:"varint,3,opt,name=stream,proto3" json:"stream,omitempty"` + Msg []byte `protobuf:"bytes,4,opt,name=msg,proto3" json:"msg,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VertexLog) Reset() { *m = VertexLog{} } +func (m *VertexLog) String() string { return proto.CompactTextString(m) } +func (*VertexLog) ProtoMessage() {} +func (*VertexLog) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{12} +} +func (m *VertexLog) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VertexLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VertexLog.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VertexLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_VertexLog.Merge(m, src) +} +func (m *VertexLog) XXX_Size() int { + return m.Size() +} +func (m *VertexLog) XXX_DiscardUnknown() { + xxx_messageInfo_VertexLog.DiscardUnknown(m) +} + +var xxx_messageInfo_VertexLog proto.InternalMessageInfo + +func (m *VertexLog) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *VertexLog) GetStream() int64 { + if m != nil { + return m.Stream + } + return 0 +} + +func (m *VertexLog) GetMsg() []byte { + if m != nil { + return m.Msg + } + return nil +} + +type BytesMessage struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesMessage) Reset() { *m = BytesMessage{} } +func (m *BytesMessage) String() string { return proto.CompactTextString(m) } +func (*BytesMessage) ProtoMessage() {} +func (*BytesMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{13} +} +func (m *BytesMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BytesMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BytesMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BytesMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesMessage.Merge(m, src) +} +func (m *BytesMessage) XXX_Size() int { + return m.Size() +} +func (m *BytesMessage) XXX_DiscardUnknown() { + xxx_messageInfo_BytesMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesMessage proto.InternalMessageInfo + +func (m *BytesMessage) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type ListWorkersRequest struct { + Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListWorkersRequest) Reset() { *m = ListWorkersRequest{} } +func (m *ListWorkersRequest) String() string { return proto.CompactTextString(m) } +func (*ListWorkersRequest) ProtoMessage() {} +func (*ListWorkersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{14} +} +func (m *ListWorkersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListWorkersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListWorkersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListWorkersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListWorkersRequest.Merge(m, src) +} +func (m *ListWorkersRequest) XXX_Size() int { + return m.Size() +} +func (m *ListWorkersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListWorkersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListWorkersRequest proto.InternalMessageInfo + +func (m *ListWorkersRequest) GetFilter() []string { + if m != nil { + return m.Filter + } + return nil +} + +type ListWorkersResponse struct { + Record []*types.WorkerRecord `protobuf:"bytes,1,rep,name=record,proto3" json:"record,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListWorkersResponse) Reset() { *m = ListWorkersResponse{} } +func (m *ListWorkersResponse) String() string { return proto.CompactTextString(m) } +func (*ListWorkersResponse) ProtoMessage() {} +func (*ListWorkersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{15} +} +func (m *ListWorkersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListWorkersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListWorkersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListWorkersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListWorkersResponse.Merge(m, src) +} +func (m *ListWorkersResponse) XXX_Size() int { + return m.Size() +} +func (m *ListWorkersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListWorkersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListWorkersResponse proto.InternalMessageInfo + +func (m *ListWorkersResponse) GetRecord() []*types.WorkerRecord { + if m != nil { + return m.Record + } + return nil +} + +func init() { + proto.RegisterType((*PruneRequest)(nil), "moby.buildkit.v1.PruneRequest") + proto.RegisterType((*DiskUsageRequest)(nil), "moby.buildkit.v1.DiskUsageRequest") + proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse") + proto.RegisterType((*UsageRecord)(nil), "moby.buildkit.v1.UsageRecord") + proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.SolveRequest") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.ExporterAttrsEntry") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.FrontendAttrsEntry") + proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.SolveRequest.FrontendInputsEntry") + proto.RegisterType((*CacheOptions)(nil), "moby.buildkit.v1.CacheOptions") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.CacheOptions.ExportAttrsDeprecatedEntry") + proto.RegisterType((*CacheOptionsEntry)(nil), "moby.buildkit.v1.CacheOptionsEntry") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.CacheOptionsEntry.AttrsEntry") + proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.SolveResponse") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveResponse.ExporterResponseEntry") + proto.RegisterType((*StatusRequest)(nil), "moby.buildkit.v1.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "moby.buildkit.v1.StatusResponse") + proto.RegisterType((*Vertex)(nil), "moby.buildkit.v1.Vertex") + proto.RegisterType((*VertexStatus)(nil), "moby.buildkit.v1.VertexStatus") + proto.RegisterType((*VertexLog)(nil), "moby.buildkit.v1.VertexLog") + proto.RegisterType((*BytesMessage)(nil), "moby.buildkit.v1.BytesMessage") + proto.RegisterType((*ListWorkersRequest)(nil), "moby.buildkit.v1.ListWorkersRequest") + proto.RegisterType((*ListWorkersResponse)(nil), "moby.buildkit.v1.ListWorkersResponse") +} + +func init() { proto.RegisterFile("control.proto", fileDescriptor_0c5120591600887d) } + +var fileDescriptor_0c5120591600887d = []byte{ + // 1397 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4d, 0x6f, 0x1b, 0xc5, + 0x1b, 0xef, 0xda, 0xf1, 0xdb, 0x63, 0x27, 0x4a, 0xa7, 0xfd, 0x57, 0xab, 0xfd, 0x8b, 0xc4, 0x6c, + 0x8b, 0x64, 0x55, 0xed, 0x3a, 0x35, 0x14, 0x95, 0x08, 0x50, 0xeb, 0xb8, 0xa8, 0xa9, 0x1a, 0x51, + 0x36, 0x2d, 0x95, 0x7a, 0x40, 0x5a, 0xdb, 0x13, 0x77, 0x95, 0xf5, 0xce, 0x32, 0x33, 0x1b, 0x6a, + 0x3e, 0x00, 0x67, 0xbe, 0x03, 0x07, 0x4e, 0x9c, 0x38, 0xf0, 0x09, 0x90, 0x7a, 0xe4, 0xdc, 0x43, + 0x40, 0xb9, 0xc3, 0x9d, 0x1b, 0x9a, 0x97, 0x75, 0xc6, 0xb1, 0x9d, 0xc4, 0xe9, 0xc9, 0xf3, 0x8c, + 0x9f, 0xdf, 0x6f, 0x9f, 0xd7, 0x99, 0x79, 0x60, 0xb9, 0x47, 0x62, 0x4e, 0x49, 0xe4, 0x25, 0x94, + 0x70, 0x82, 0x56, 0x87, 0xa4, 0x3b, 0xf2, 0xba, 0x69, 0x18, 0xf5, 0xf7, 0x43, 0xee, 0x1d, 0xdc, + 0x71, 0x6e, 0x0f, 0x42, 0xfe, 0x2a, 0xed, 0x7a, 0x3d, 0x32, 0x6c, 0x0e, 0xc8, 0x80, 0x34, 0xa5, + 0x62, 0x37, 0xdd, 0x93, 0x92, 0x14, 0xe4, 0x4a, 0x11, 0x38, 0xeb, 0x03, 0x42, 0x06, 0x11, 0x3e, + 0xd6, 0xe2, 0xe1, 0x10, 0x33, 0x1e, 0x0c, 0x13, 0xad, 0x70, 0xcb, 0xe0, 0x13, 0x1f, 0x6b, 0x66, + 0x1f, 0x6b, 0x32, 0x12, 0x1d, 0x60, 0xda, 0x4c, 0xba, 0x4d, 0x92, 0x30, 0xad, 0xdd, 0x9c, 0xab, + 0x1d, 0x24, 0x61, 0x93, 0x8f, 0x12, 0xcc, 0x9a, 0xdf, 0x11, 0xba, 0x8f, 0xa9, 0x02, 0xb8, 0x3f, + 0x58, 0x50, 0x7b, 0x4a, 0xd3, 0x18, 0xfb, 0xf8, 0xdb, 0x14, 0x33, 0x8e, 0xae, 0x41, 0x71, 0x2f, + 0x8c, 0x38, 0xa6, 0xb6, 0x55, 0xcf, 0x37, 0x2a, 0xbe, 0x96, 0xd0, 0x2a, 0xe4, 0x83, 0x28, 0xb2, + 0x73, 0x75, 0xab, 0x51, 0xf6, 0xc5, 0x12, 0x35, 0xa0, 0xb6, 0x8f, 0x71, 0xd2, 0x49, 0x69, 0xc0, + 0x43, 0x12, 0xdb, 0xf9, 0xba, 0xd5, 0xc8, 0xb7, 0x97, 0xde, 0x1c, 0xae, 0x5b, 0xfe, 0xc4, 0x3f, + 0xc8, 0x85, 0x8a, 0x90, 0xdb, 0x23, 0x8e, 0x99, 0xbd, 0x64, 0xa8, 0x1d, 0x6f, 0xbb, 0x37, 0x61, + 0xb5, 0x13, 0xb2, 0xfd, 0xe7, 0x2c, 0x18, 0x9c, 0x65, 0x8b, 0xfb, 0x18, 0x2e, 0x1b, 0xba, 0x2c, + 0x21, 0x31, 0xc3, 0xe8, 0x2e, 0x14, 0x29, 0xee, 0x11, 0xda, 0x97, 0xca, 0xd5, 0xd6, 0x7b, 0xde, + 0xc9, 0xdc, 0x78, 0x1a, 0x20, 0x94, 0x7c, 0xad, 0xec, 0xfe, 0x9b, 0x83, 0xaa, 0xb1, 0x8f, 0x56, + 0x20, 0xb7, 0xdd, 0xb1, 0xad, 0xba, 0xd5, 0xa8, 0xf8, 0xb9, 0xed, 0x0e, 0xb2, 0xa1, 0xb4, 0x93, + 0xf2, 0xa0, 0x1b, 0x61, 0xed, 0x7b, 0x26, 0xa2, 0xab, 0x50, 0xd8, 0x8e, 0x9f, 0x33, 0x2c, 0x1d, + 0x2f, 0xfb, 0x4a, 0x40, 0x08, 0x96, 0x76, 0xc3, 0xef, 0xb1, 0x72, 0xd3, 0x97, 0x6b, 0xe1, 0xc7, + 0xd3, 0x80, 0xe2, 0x98, 0xdb, 0x05, 0xc9, 0xab, 0x25, 0xd4, 0x86, 0xca, 0x16, 0xc5, 0x01, 0xc7, + 0xfd, 0x07, 0xdc, 0x2e, 0xd6, 0xad, 0x46, 0xb5, 0xe5, 0x78, 0xaa, 0x20, 0xbc, 0xac, 0x20, 0xbc, + 0x67, 0x59, 0x41, 0xb4, 0xcb, 0x6f, 0x0e, 0xd7, 0x2f, 0xfd, 0xf8, 0xa7, 0x88, 0xdb, 0x18, 0x86, + 0xee, 0x03, 0x3c, 0x09, 0x18, 0x7f, 0xce, 0x24, 0x49, 0xe9, 0x4c, 0x92, 0x25, 0x49, 0x60, 0x60, + 0xd0, 0x1a, 0x80, 0x0c, 0xc0, 0x16, 0x49, 0x63, 0x6e, 0x97, 0xa5, 0xdd, 0xc6, 0x0e, 0xaa, 0x43, + 0xb5, 0x83, 0x59, 0x8f, 0x86, 0x89, 0x4c, 0x73, 0x45, 0xba, 0x60, 0x6e, 0x09, 0x06, 0x15, 0xbd, + 0x67, 0xa3, 0x04, 0xdb, 0x20, 0x15, 0x8c, 0x1d, 0xe1, 0xff, 0xee, 0xab, 0x80, 0xe2, 0xbe, 0x5d, + 0x95, 0xa1, 0xd2, 0x92, 0xfb, 0x53, 0x11, 0x6a, 0xbb, 0xa2, 0x8a, 0xb3, 0x84, 0xaf, 0x42, 0xde, + 0xc7, 0x7b, 0x3a, 0xfa, 0x62, 0x89, 0x3c, 0x80, 0x0e, 0xde, 0x0b, 0xe3, 0x50, 0x7e, 0x3b, 0x27, + 0xdd, 0x5b, 0xf1, 0x92, 0xae, 0x77, 0xbc, 0xeb, 0x1b, 0x1a, 0xc8, 0x81, 0xf2, 0xc3, 0xd7, 0x09, + 0xa1, 0xa2, 0x68, 0xf2, 0x92, 0x66, 0x2c, 0xa3, 0x17, 0xb0, 0x9c, 0xad, 0x1f, 0x70, 0x4e, 0x45, + 0x29, 0x8a, 0x42, 0xb9, 0x33, 0x5d, 0x28, 0xa6, 0x51, 0xde, 0x04, 0xe6, 0x61, 0xcc, 0xe9, 0xc8, + 0x9f, 0xe4, 0x11, 0x35, 0xb2, 0x8b, 0x19, 0x13, 0x16, 0xaa, 0x04, 0x67, 0xa2, 0x30, 0xe7, 0x0b, + 0x4a, 0x62, 0x8e, 0xe3, 0xbe, 0x4c, 0x70, 0xc5, 0x1f, 0xcb, 0xc2, 0x9c, 0x6c, 0xad, 0xcc, 0x29, + 0x9d, 0xcb, 0x9c, 0x09, 0x8c, 0x36, 0x67, 0x62, 0x0f, 0x6d, 0x42, 0x61, 0x2b, 0xe8, 0xbd, 0xc2, + 0x32, 0x97, 0xd5, 0xd6, 0xda, 0x34, 0xa1, 0xfc, 0xfb, 0x4b, 0x99, 0x3c, 0x26, 0x5b, 0xf1, 0x92, + 0xaf, 0x20, 0xe8, 0x1b, 0xa8, 0x3d, 0x8c, 0x79, 0xc8, 0x23, 0x3c, 0xc4, 0x31, 0x67, 0x76, 0x45, + 0x34, 0x5e, 0x7b, 0xf3, 0xed, 0xe1, 0xfa, 0xc7, 0x73, 0x8f, 0x96, 0x94, 0x87, 0x51, 0x13, 0x1b, + 0x28, 0xcf, 0xa0, 0xf0, 0x27, 0xf8, 0xd0, 0x4b, 0x58, 0xc9, 0x8c, 0xdd, 0x8e, 0x93, 0x94, 0x33, + 0x1b, 0xa4, 0xd7, 0xad, 0x73, 0x7a, 0xad, 0x40, 0xca, 0xed, 0x13, 0x4c, 0xce, 0x7d, 0x40, 0xd3, + 0xb9, 0x12, 0x35, 0xb5, 0x8f, 0x47, 0x59, 0x4d, 0xed, 0xe3, 0x91, 0x68, 0xdc, 0x83, 0x20, 0x4a, + 0x55, 0x43, 0x57, 0x7c, 0x25, 0x6c, 0xe6, 0xee, 0x59, 0x82, 0x61, 0x3a, 0xbc, 0x0b, 0x31, 0x7c, + 0x05, 0x57, 0x66, 0x98, 0x3a, 0x83, 0xe2, 0x86, 0x49, 0x31, 0x5d, 0xd3, 0xc7, 0x94, 0xee, 0x2f, + 0x79, 0xa8, 0x99, 0x09, 0x43, 0x1b, 0x70, 0x45, 0xf9, 0xe9, 0xe3, 0xbd, 0x0e, 0x4e, 0x28, 0xee, + 0x89, 0xb3, 0x40, 0x93, 0xcf, 0xfa, 0x0b, 0xb5, 0xe0, 0xea, 0xf6, 0x50, 0x6f, 0x33, 0x03, 0x92, + 0x93, 0xc7, 0xea, 0xcc, 0xff, 0x10, 0x81, 0xff, 0x29, 0x2a, 0x19, 0x09, 0x03, 0x94, 0x97, 0x09, + 0xfb, 0xe4, 0xf4, 0xaa, 0xf2, 0x66, 0x62, 0x55, 0xde, 0x66, 0xf3, 0xa2, 0xcf, 0xa0, 0xa4, 0xfe, + 0xc8, 0x1a, 0xf3, 0xfa, 0xe9, 0x9f, 0x50, 0x64, 0x19, 0x46, 0xc0, 0x95, 0x1f, 0xcc, 0x2e, 0x2c, + 0x00, 0xd7, 0x18, 0xe7, 0x11, 0x38, 0xf3, 0x4d, 0x5e, 0xa4, 0x04, 0xdc, 0x9f, 0x2d, 0xb8, 0x3c, + 0xf5, 0x21, 0x71, 0x2f, 0xc8, 0xd3, 0x51, 0x51, 0xc8, 0x35, 0xea, 0x40, 0x41, 0x75, 0x7e, 0x4e, + 0x1a, 0xec, 0x9d, 0xc3, 0x60, 0xcf, 0x68, 0x7b, 0x05, 0x76, 0xee, 0x01, 0x5c, 0xac, 0x58, 0xdd, + 0xdf, 0x2c, 0x58, 0xd6, 0x5d, 0xa6, 0x2f, 0xd1, 0x00, 0x56, 0xb3, 0x16, 0xca, 0xf6, 0xf4, 0x75, + 0x7a, 0x77, 0x6e, 0x83, 0x2a, 0x35, 0xef, 0x24, 0x4e, 0xd9, 0x38, 0x45, 0xe7, 0x6c, 0x65, 0x75, + 0x75, 0x42, 0x75, 0x21, 0xcb, 0xdf, 0x87, 0xe5, 0x5d, 0x1e, 0xf0, 0x94, 0xcd, 0xbd, 0x39, 0xdc, + 0x5f, 0x2d, 0x58, 0xc9, 0x74, 0xb4, 0x77, 0x1f, 0x41, 0xf9, 0x00, 0x53, 0x8e, 0x5f, 0x63, 0xa6, + 0xbd, 0xb2, 0xa7, 0xbd, 0xfa, 0x5a, 0x6a, 0xf8, 0x63, 0x4d, 0xb4, 0x09, 0x65, 0x26, 0x79, 0x70, + 0x96, 0xa8, 0xb5, 0x79, 0x28, 0xfd, 0xbd, 0xb1, 0x3e, 0x6a, 0xc2, 0x52, 0x44, 0x06, 0x4c, 0xf7, + 0xcc, 0xff, 0xe7, 0xe1, 0x9e, 0x90, 0x81, 0x2f, 0x15, 0xdd, 0xc3, 0x1c, 0x14, 0xd5, 0x1e, 0x7a, + 0x0c, 0xc5, 0x7e, 0x38, 0xc0, 0x8c, 0x2b, 0xaf, 0xda, 0x2d, 0x71, 0x4e, 0xbf, 0x3d, 0x5c, 0xbf, + 0x69, 0x1c, 0xc4, 0x24, 0xc1, 0xb1, 0x78, 0x91, 0x06, 0x61, 0x8c, 0x29, 0x6b, 0x0e, 0xc8, 0x6d, + 0x05, 0xf1, 0x3a, 0xf2, 0xc7, 0xd7, 0x0c, 0x82, 0x2b, 0x54, 0xc7, 0xad, 0x6c, 0xf9, 0x8b, 0x71, + 0x29, 0x06, 0x51, 0xc9, 0x71, 0x30, 0xc4, 0xfa, 0x7a, 0x95, 0x6b, 0x71, 0xc3, 0xf7, 0x44, 0xa9, + 0xf6, 0xe5, 0xbb, 0xa7, 0xec, 0x6b, 0x09, 0x6d, 0x42, 0x89, 0xf1, 0x80, 0x8a, 0x63, 0xa3, 0x70, + 0xce, 0xa7, 0x49, 0x06, 0x40, 0x9f, 0x43, 0xa5, 0x47, 0x86, 0x49, 0x84, 0x05, 0xba, 0x78, 0x4e, + 0xf4, 0x31, 0x44, 0x54, 0x0f, 0xa6, 0x94, 0x50, 0xf9, 0x28, 0xaa, 0xf8, 0x4a, 0x70, 0xff, 0xc9, + 0x41, 0xcd, 0x4c, 0xd6, 0xd4, 0x83, 0xef, 0x31, 0x14, 0x55, 0xea, 0x55, 0xd5, 0x5d, 0x2c, 0x54, + 0x8a, 0x61, 0x66, 0xa8, 0x6c, 0x28, 0xf5, 0x52, 0x2a, 0x5f, 0x83, 0xea, 0x8d, 0x98, 0x89, 0xc2, + 0x60, 0x4e, 0x78, 0x10, 0xc9, 0x50, 0xe5, 0x7d, 0x25, 0x88, 0x47, 0xe2, 0x78, 0x26, 0x58, 0xec, + 0x91, 0x38, 0x86, 0x99, 0x69, 0x28, 0xbd, 0x53, 0x1a, 0xca, 0x0b, 0xa7, 0xc1, 0xfd, 0xdd, 0x82, + 0xca, 0xb8, 0xca, 0x8d, 0xe8, 0x5a, 0xef, 0x1c, 0xdd, 0x89, 0xc8, 0xe4, 0x2e, 0x16, 0x99, 0x6b, + 0x50, 0x64, 0x9c, 0xe2, 0x60, 0xa8, 0xc6, 0x17, 0x5f, 0x4b, 0xe2, 0x3c, 0x19, 0xb2, 0x81, 0xcc, + 0x50, 0xcd, 0x17, 0x4b, 0xd7, 0x85, 0x9a, 0x9c, 0x54, 0x76, 0x30, 0x13, 0x6f, 0x63, 0x91, 0xdb, + 0x7e, 0xc0, 0x03, 0xe9, 0x47, 0xcd, 0x97, 0x6b, 0xf7, 0x16, 0xa0, 0x27, 0x21, 0xe3, 0x2f, 0xe4, + 0x84, 0xc5, 0xce, 0x1a, 0x63, 0x76, 0xe1, 0xca, 0x84, 0xb6, 0x3e, 0xa5, 0x3e, 0x3d, 0x31, 0xc8, + 0xdc, 0x98, 0x3e, 0x35, 0xe4, 0x20, 0xe7, 0x29, 0xe0, 0xe4, 0x3c, 0xd3, 0xfa, 0x3b, 0x0f, 0xa5, + 0x2d, 0x35, 0xa3, 0xa2, 0x67, 0x50, 0x19, 0xcf, 0x49, 0xc8, 0x9d, 0xa6, 0x39, 0x39, 0x70, 0x39, + 0xd7, 0x4f, 0xd5, 0xd1, 0xf6, 0x3d, 0x82, 0x82, 0x9c, 0x18, 0xd1, 0x8c, 0x63, 0xd0, 0x1c, 0x25, + 0x9d, 0xd3, 0x27, 0xb0, 0x0d, 0x4b, 0x30, 0xc9, 0x3b, 0x64, 0x16, 0x93, 0xf9, 0xfa, 0x73, 0xd6, + 0xcf, 0xb8, 0x7c, 0xd0, 0x0e, 0x14, 0x75, 0x3b, 0xcf, 0x52, 0x35, 0x6f, 0x0a, 0xa7, 0x3e, 0x5f, + 0x41, 0x91, 0x6d, 0x58, 0x68, 0x67, 0xfc, 0xa0, 0x9f, 0x65, 0x9a, 0x59, 0x06, 0xce, 0x19, 0xff, + 0x37, 0xac, 0x0d, 0x0b, 0xbd, 0x84, 0xaa, 0x91, 0x68, 0x34, 0x23, 0xa1, 0xd3, 0x55, 0xe3, 0x7c, + 0x70, 0x86, 0x96, 0x32, 0xb6, 0x5d, 0x7b, 0x73, 0xb4, 0x66, 0xfd, 0x71, 0xb4, 0x66, 0xfd, 0x75, + 0xb4, 0x66, 0x75, 0x8b, 0xb2, 0xee, 0x3f, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x65, 0x7c, + 0xd6, 0xa7, 0x10, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ControlClient is the client API for Control service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ControlClient interface { + DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) + Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) + Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) + Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) + ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) +} + +type controlClient struct { + cc *grpc.ClientConn +} + +func NewControlClient(cc *grpc.ClientConn) ControlClient { + return &controlClient{cc} +} + +func (c *controlClient) DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) { + out := new(DiskUsageResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/DiskUsage", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) { + stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[0], "/moby.buildkit.v1.Control/Prune", opts...) + if err != nil { + return nil, err + } + x := &controlPruneClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Control_PruneClient interface { + Recv() (*UsageRecord, error) + grpc.ClientStream +} + +type controlPruneClient struct { + grpc.ClientStream +} + +func (x *controlPruneClient) Recv() (*UsageRecord, error) { + m := new(UsageRecord) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *controlClient) Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) { + out := new(SolveResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/Solve", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) { + stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[1], "/moby.buildkit.v1.Control/Status", opts...) + if err != nil { + return nil, err + } + x := &controlStatusClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Control_StatusClient interface { + Recv() (*StatusResponse, error) + grpc.ClientStream +} + +type controlStatusClient struct { + grpc.ClientStream +} + +func (x *controlStatusClient) Recv() (*StatusResponse, error) { + m := new(StatusResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *controlClient) Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) { + stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[2], "/moby.buildkit.v1.Control/Session", opts...) + if err != nil { + return nil, err + } + x := &controlSessionClient{stream} + return x, nil +} + +type Control_SessionClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type controlSessionClient struct { + grpc.ClientStream +} + +func (x *controlSessionClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *controlSessionClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *controlClient) ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) { + out := new(ListWorkersResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/ListWorkers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ControlServer is the server API for Control service. +type ControlServer interface { + DiskUsage(context.Context, *DiskUsageRequest) (*DiskUsageResponse, error) + Prune(*PruneRequest, Control_PruneServer) error + Solve(context.Context, *SolveRequest) (*SolveResponse, error) + Status(*StatusRequest, Control_StatusServer) error + Session(Control_SessionServer) error + ListWorkers(context.Context, *ListWorkersRequest) (*ListWorkersResponse, error) +} + +// UnimplementedControlServer can be embedded to have forward compatible implementations. +type UnimplementedControlServer struct { +} + +func (*UnimplementedControlServer) DiskUsage(ctx context.Context, req *DiskUsageRequest) (*DiskUsageResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DiskUsage not implemented") +} +func (*UnimplementedControlServer) Prune(req *PruneRequest, srv Control_PruneServer) error { + return status.Errorf(codes.Unimplemented, "method Prune not implemented") +} +func (*UnimplementedControlServer) Solve(ctx context.Context, req *SolveRequest) (*SolveResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Solve not implemented") +} +func (*UnimplementedControlServer) Status(req *StatusRequest, srv Control_StatusServer) error { + return status.Errorf(codes.Unimplemented, "method Status not implemented") +} +func (*UnimplementedControlServer) Session(srv Control_SessionServer) error { + return status.Errorf(codes.Unimplemented, "method Session not implemented") +} +func (*UnimplementedControlServer) ListWorkers(ctx context.Context, req *ListWorkersRequest) (*ListWorkersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListWorkers not implemented") +} + +func RegisterControlServer(s *grpc.Server, srv ControlServer) { + s.RegisterService(&_Control_serviceDesc, srv) +} + +func _Control_DiskUsage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DiskUsageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).DiskUsage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/DiskUsage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).DiskUsage(ctx, req.(*DiskUsageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_Prune_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(PruneRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ControlServer).Prune(m, &controlPruneServer{stream}) +} + +type Control_PruneServer interface { + Send(*UsageRecord) error + grpc.ServerStream +} + +type controlPruneServer struct { + grpc.ServerStream +} + +func (x *controlPruneServer) Send(m *UsageRecord) error { + return x.ServerStream.SendMsg(m) +} + +func _Control_Solve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SolveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).Solve(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/Solve", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).Solve(ctx, req.(*SolveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_Status_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(StatusRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ControlServer).Status(m, &controlStatusServer{stream}) +} + +type Control_StatusServer interface { + Send(*StatusResponse) error + grpc.ServerStream +} + +type controlStatusServer struct { + grpc.ServerStream +} + +func (x *controlStatusServer) Send(m *StatusResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Control_Session_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ControlServer).Session(&controlSessionServer{stream}) +} + +type Control_SessionServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type controlSessionServer struct { + grpc.ServerStream +} + +func (x *controlSessionServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *controlSessionServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Control_ListWorkers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListWorkersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListWorkers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/ListWorkers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListWorkers(ctx, req.(*ListWorkersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Control_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.buildkit.v1.Control", + HandlerType: (*ControlServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DiskUsage", + Handler: _Control_DiskUsage_Handler, + }, + { + MethodName: "Solve", + Handler: _Control_Solve_Handler, + }, + { + MethodName: "ListWorkers", + Handler: _Control_ListWorkers_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Prune", + Handler: _Control_Prune_Handler, + ServerStreams: true, + }, + { + StreamName: "Status", + Handler: _Control_Status_Handler, + ServerStreams: true, + }, + { + StreamName: "Session", + Handler: _Control_Session_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "control.proto", +} + +func (m *PruneRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PruneRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PruneRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.KeepBytes != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.KeepBytes)) + i-- + dAtA[i] = 0x20 + } + if m.KeepDuration != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.KeepDuration)) + i-- + dAtA[i] = 0x18 + } + if m.All { + i-- + if m.All { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Filter) > 0 { + for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Filter[iNdEx]) + copy(dAtA[i:], m.Filter[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DiskUsageRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiskUsageRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DiskUsageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Filter) > 0 { + for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Filter[iNdEx]) + copy(dAtA[i:], m.Filter[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DiskUsageResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiskUsageResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DiskUsageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Record) > 0 { + for iNdEx := len(m.Record) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Record[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UsageRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UsageRecord) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UsageRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Shared { + i-- + if m.Shared { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if len(m.RecordType) > 0 { + i -= len(m.RecordType) + copy(dAtA[i:], m.RecordType) + i = encodeVarintControl(dAtA, i, uint64(len(m.RecordType))) + i-- + dAtA[i] = 0x52 + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintControl(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x4a + } + if m.UsageCount != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.UsageCount)) + i-- + dAtA[i] = 0x40 + } + if m.LastUsedAt != nil { + n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastUsedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastUsedAt):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintControl(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x3a + } + n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintControl(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x32 + if len(m.Parent) > 0 { + i -= len(m.Parent) + copy(dAtA[i:], m.Parent) + i = encodeVarintControl(dAtA, i, uint64(len(m.Parent))) + i-- + dAtA[i] = 0x2a + } + if m.Size_ != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Size_)) + i-- + dAtA[i] = 0x20 + } + if m.InUse { + i-- + if m.InUse { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Mutable { + i-- + if m.Mutable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.ID) > 0 { + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintControl(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SolveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.FrontendInputs) > 0 { + for k := range m.FrontendInputs { + v := m.FrontendInputs[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if len(m.Entitlements) > 0 { + for iNdEx := len(m.Entitlements) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Entitlements[iNdEx]) + copy(dAtA[i:], m.Entitlements[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.Entitlements[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + { + size, err := m.Cache.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + if len(m.FrontendAttrs) > 0 { + for k := range m.FrontendAttrs { + v := m.FrontendAttrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Frontend) > 0 { + i -= len(m.Frontend) + copy(dAtA[i:], m.Frontend) + i = encodeVarintControl(dAtA, i, uint64(len(m.Frontend))) + i-- + dAtA[i] = 0x32 + } + if len(m.Session) > 0 { + i -= len(m.Session) + copy(dAtA[i:], m.Session) + i = encodeVarintControl(dAtA, i, uint64(len(m.Session))) + i-- + dAtA[i] = 0x2a + } + if len(m.ExporterAttrs) > 0 { + for k := range m.ExporterAttrs { + v := m.ExporterAttrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Exporter) > 0 { + i -= len(m.Exporter) + copy(dAtA[i:], m.Exporter) + i = encodeVarintControl(dAtA, i, uint64(len(m.Exporter))) + i-- + dAtA[i] = 0x1a + } + if m.Definition != nil { + { + size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CacheOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CacheOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CacheOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Imports) > 0 { + for iNdEx := len(m.Imports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Imports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Exports) > 0 { + for iNdEx := len(m.Exports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.ExportAttrsDeprecated) > 0 { + for k := range m.ExportAttrsDeprecated { + v := m.ExportAttrsDeprecated[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.ImportRefsDeprecated) > 0 { + for iNdEx := len(m.ImportRefsDeprecated) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ImportRefsDeprecated[iNdEx]) + copy(dAtA[i:], m.ImportRefsDeprecated[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.ImportRefsDeprecated[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ExportRefDeprecated) > 0 { + i -= len(m.ExportRefDeprecated) + copy(dAtA[i:], m.ExportRefDeprecated) + i = encodeVarintControl(dAtA, i, uint64(len(m.ExportRefDeprecated))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CacheOptionsEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Attrs) > 0 { + for k := range m.Attrs { + v := m.Attrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintControl(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SolveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.ExporterResponse) > 0 { + for k := range m.ExporterResponse { + v := m.ExporterResponse[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Logs) > 0 { + for iNdEx := len(m.Logs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Logs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Statuses) > 0 { + for iNdEx := len(m.Statuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Statuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Vertexes) > 0 { + for iNdEx := len(m.Vertexes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Vertexes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Vertex) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Vertex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Vertex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintControl(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x3a + } + if m.Completed != nil { + n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) + if err6 != nil { + return 0, err6 + } + i -= n6 + i = encodeVarintControl(dAtA, i, uint64(n6)) + i-- + dAtA[i] = 0x32 + } + if m.Started != nil { + n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) + if err7 != nil { + return 0, err7 + } + i -= n7 + i = encodeVarintControl(dAtA, i, uint64(n7)) + i-- + dAtA[i] = 0x2a + } + if m.Cached { + i-- + if m.Cached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.Inputs) > 0 { + for iNdEx := len(m.Inputs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Inputs[iNdEx]) + copy(dAtA[i:], m.Inputs[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.Inputs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Digest) > 0 { + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintControl(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VertexStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VertexStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Completed != nil { + n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) + if err8 != nil { + return 0, err8 + } + i -= n8 + i = encodeVarintControl(dAtA, i, uint64(n8)) + i-- + dAtA[i] = 0x42 + } + if m.Started != nil { + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) + if err9 != nil { + return 0, err9 + } + i -= n9 + i = encodeVarintControl(dAtA, i, uint64(n9)) + i-- + dAtA[i] = 0x3a + } + n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err10 != nil { + return 0, err10 + } + i -= n10 + i = encodeVarintControl(dAtA, i, uint64(n10)) + i-- + dAtA[i] = 0x32 + if m.Total != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x28 + } + if m.Current != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Current)) + i-- + dAtA[i] = 0x20 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.Vertex) > 0 { + i -= len(m.Vertex) + copy(dAtA[i:], m.Vertex) + i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) + i-- + dAtA[i] = 0x12 + } + if len(m.ID) > 0 { + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintControl(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VertexLog) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VertexLog) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VertexLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Msg) > 0 { + i -= len(m.Msg) + copy(dAtA[i:], m.Msg) + i = encodeVarintControl(dAtA, i, uint64(len(m.Msg))) + i-- + dAtA[i] = 0x22 + } + if m.Stream != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Stream)) + i-- + dAtA[i] = 0x18 + } + n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err11 != nil { + return 0, err11 + } + i -= n11 + i = encodeVarintControl(dAtA, i, uint64(n11)) + i-- + dAtA[i] = 0x12 + if len(m.Vertex) > 0 { + i -= len(m.Vertex) + copy(dAtA[i:], m.Vertex) + i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BytesMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BytesMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintControl(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListWorkersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListWorkersRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListWorkersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Filter) > 0 { + for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Filter[iNdEx]) + copy(dAtA[i:], m.Filter[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ListWorkersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListWorkersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListWorkersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Record) > 0 { + for iNdEx := len(m.Record) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Record[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintControl(dAtA []byte, offset int, v uint64) int { + offset -= sovControl(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PruneRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if m.All { + n += 2 + } + if m.KeepDuration != 0 { + n += 1 + sovControl(uint64(m.KeepDuration)) + } + if m.KeepBytes != 0 { + n += 1 + sovControl(uint64(m.KeepBytes)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DiskUsageRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DiskUsageResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Record) > 0 { + for _, e := range m.Record { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UsageRecord) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Mutable { + n += 2 + } + if m.InUse { + n += 2 + } + if m.Size_ != 0 { + n += 1 + sovControl(uint64(m.Size_)) + } + l = len(m.Parent) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) + n += 1 + l + sovControl(uint64(l)) + if m.LastUsedAt != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastUsedAt) + n += 1 + l + sovControl(uint64(l)) + } + if m.UsageCount != 0 { + n += 1 + sovControl(uint64(m.UsageCount)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.RecordType) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Shared { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SolveRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Definition != nil { + l = m.Definition.Size() + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Exporter) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.ExporterAttrs) > 0 { + for k, v := range m.ExporterAttrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + l = len(m.Session) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Frontend) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.FrontendAttrs) > 0 { + for k, v := range m.FrontendAttrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + l = m.Cache.Size() + n += 1 + l + sovControl(uint64(l)) + if len(m.Entitlements) > 0 { + for _, s := range m.Entitlements { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.FrontendInputs) > 0 { + for k, v := range m.FrontendInputs { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovControl(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + l + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CacheOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ExportRefDeprecated) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.ImportRefsDeprecated) > 0 { + for _, s := range m.ImportRefsDeprecated { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.ExportAttrsDeprecated) > 0 { + for k, v := range m.ExportAttrsDeprecated { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.Exports) > 0 { + for _, e := range m.Exports { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Imports) > 0 { + for _, e := range m.Imports { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CacheOptionsEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SolveResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExporterResponse) > 0 { + for k, v := range m.ExporterResponse { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Vertexes) > 0 { + for _, e := range m.Vertexes { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Statuses) > 0 { + for _, e := range m.Statuses { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Logs) > 0 { + for _, e := range m.Logs { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Vertex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Inputs) > 0 { + for _, s := range m.Inputs { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Cached { + n += 2 + } + if m.Started != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started) + n += 1 + l + sovControl(uint64(l)) + } + if m.Completed != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed) + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VertexStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Vertex) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Current != 0 { + n += 1 + sovControl(uint64(m.Current)) + } + if m.Total != 0 { + n += 1 + sovControl(uint64(m.Total)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovControl(uint64(l)) + if m.Started != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started) + n += 1 + l + sovControl(uint64(l)) + } + if m.Completed != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed) + n += 1 + l + sovControl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VertexLog) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Vertex) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) + n += 1 + l + sovControl(uint64(l)) + if m.Stream != 0 { + n += 1 + sovControl(uint64(m.Stream)) + } + l = len(m.Msg) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BytesMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ListWorkersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ListWorkersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Record) > 0 { + for _, e := range m.Record { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovControl(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozControl(x uint64) (n int) { + return sovControl(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PruneRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PruneRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PruneRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.All = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepDuration", wireType) + } + m.KeepDuration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KeepDuration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepBytes", wireType) + } + m.KeepBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KeepBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiskUsageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiskUsageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiskUsageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiskUsageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Record = append(m.Record, &UsageRecord{}) + if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UsageRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UsageRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UsageRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mutable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Mutable = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InUse", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InUse = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parent = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUsedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastUsedAt == nil { + m.LastUsedAt = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.LastUsedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UsageCount", wireType) + } + m.UsageCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UsageCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RecordType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RecordType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Shared", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Shared = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SolveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SolveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SolveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Definition == nil { + m.Definition = &pb.Definition{} + } + if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exporter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exporter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExporterAttrs == nil { + m.ExporterAttrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExporterAttrs[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Session = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Frontend = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendAttrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FrontendAttrs == nil { + m.FrontendAttrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.FrontendAttrs[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Cache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entitlements", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entitlements = append(m.Entitlements, github_com_moby_buildkit_util_entitlements.Entitlement(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendInputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FrontendInputs == nil { + m.FrontendInputs = make(map[string]*pb.Definition) + } + var mapkey string + var mapvalue *pb.Definition + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthControl + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthControl + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &pb.Definition{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.FrontendInputs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CacheOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExportRefDeprecated", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExportRefDeprecated = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportRefsDeprecated", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImportRefsDeprecated = append(m.ImportRefsDeprecated, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExportAttrsDeprecated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExportAttrsDeprecated == nil { + m.ExportAttrsDeprecated = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExportAttrsDeprecated[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exports = append(m.Exports, &CacheOptionsEntry{}) + if err := m.Exports[len(m.Exports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Imports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Imports = append(m.Imports, &CacheOptionsEntry{}) + if err := m.Imports[len(m.Imports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheOptionsEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheOptionsEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attrs == nil { + m.Attrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SolveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SolveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SolveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExporterResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExporterResponse == nil { + m.ExporterResponse = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExporterResponse[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vertexes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vertexes = append(m.Vertexes, &Vertex{}) + if err := m.Vertexes[len(m.Vertexes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Statuses = append(m.Statuses, &VertexStatus{}) + if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logs = append(m.Logs, &VertexLog{}) + if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Vertex) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Vertex: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Vertex: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Inputs = append(m.Inputs, github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cached", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Cached = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Started == nil { + m.Started = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Completed == nil { + m.Completed = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VertexStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VertexStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VertexStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + m.Current = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Current |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Started == nil { + m.Started = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Completed == nil { + m.Completed = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VertexLog) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VertexLog: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VertexLog: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + m.Stream = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Stream |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Msg = append(m.Msg[:0], dAtA[iNdEx:postIndex]...) + if m.Msg == nil { + m.Msg = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListWorkersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListWorkersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListWorkersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListWorkersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListWorkersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListWorkersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Record = append(m.Record, &types.WorkerRecord{}) + if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipControl(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthControl + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupControl + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthControl + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthControl = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowControl = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupControl = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.proto b/vendor/github.com/moby/buildkit/api/services/control/control.proto new file mode 100644 index 00000000..fe10b6ac --- /dev/null +++ b/vendor/github.com/moby/buildkit/api/services/control/control.proto @@ -0,0 +1,150 @@ +syntax = "proto3"; + +package moby.buildkit.v1; + +// The control API is currently considered experimental and may break in a backwards +// incompatible way. + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "github.com/moby/buildkit/solver/pb/ops.proto"; +import "github.com/moby/buildkit/api/types/worker.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +service Control { + rpc DiskUsage(DiskUsageRequest) returns (DiskUsageResponse); + rpc Prune(PruneRequest) returns (stream UsageRecord); + rpc Solve(SolveRequest) returns (SolveResponse); + rpc Status(StatusRequest) returns (stream StatusResponse); + rpc Session(stream BytesMessage) returns (stream BytesMessage); + rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse); + // rpc Info(InfoRequest) returns (InfoResponse); +} + +message PruneRequest { + repeated string filter = 1; + bool all = 2; + int64 keepDuration = 3 [(gogoproto.nullable) = true]; + int64 keepBytes = 4 [(gogoproto.nullable) = true]; +} + +message DiskUsageRequest { + repeated string filter = 1; +} + +message DiskUsageResponse { + repeated UsageRecord record = 1; +} + +message UsageRecord { + string ID = 1; + bool Mutable = 2; + bool InUse = 3; + int64 Size = 4; + string Parent = 5; + google.protobuf.Timestamp CreatedAt = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp LastUsedAt = 7 [(gogoproto.stdtime) = true]; + int64 UsageCount = 8; + string Description = 9; + string RecordType = 10; + bool Shared = 11; +} + +message SolveRequest { + string Ref = 1; + pb.Definition Definition = 2; + string Exporter = 3; + map ExporterAttrs = 4; + string Session = 5; + string Frontend = 6; + map FrontendAttrs = 7; + CacheOptions Cache = 8 [(gogoproto.nullable) = false]; + repeated string Entitlements = 9 [(gogoproto.customtype) = "github.com/moby/buildkit/util/entitlements.Entitlement" ]; + map FrontendInputs = 10; +} + +message CacheOptions { + // ExportRefDeprecated is deprecated in favor or the new Exports since BuildKit v0.4.0. + // When ExportRefDeprecated is set, the solver appends + // {.Type = "registry", .Attrs = ExportAttrs.add("ref", ExportRef)} + // to Exports for compatibility. (planned to be removed) + string ExportRefDeprecated = 1; + // ImportRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0. + // When ImportRefsDeprecated is set, the solver appends + // {.Type = "registry", .Attrs = {"ref": importRef}} + // for each of the ImportRefs entry to Imports for compatibility. (planned to be removed) + repeated string ImportRefsDeprecated = 2; + // ExportAttrsDeprecated is deprecated since BuildKit v0.4.0. + // See the description of ExportRefDeprecated. + map ExportAttrsDeprecated = 3; + // Exports was introduced in BuildKit v0.4.0. + repeated CacheOptionsEntry Exports = 4; + // Imports was introduced in BuildKit v0.4.0. + repeated CacheOptionsEntry Imports = 5; +} + +message CacheOptionsEntry { + // Type is like "registry" or "local" + string Type = 1; + // Attrs are like mode=(min,max), ref=example.com:5000/foo/bar . + // See cache importer/exporter implementations' documentation. + map Attrs = 2; +} + +message SolveResponse { + map ExporterResponse = 1; +} + +message StatusRequest { + string Ref = 1; +} + +message StatusResponse { + repeated Vertex vertexes = 1; + repeated VertexStatus statuses = 2; + repeated VertexLog logs = 3; +} + +message Vertex { + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + repeated string inputs = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string name = 3; + bool cached = 4; + google.protobuf.Timestamp started = 5 [(gogoproto.stdtime) = true ]; + google.protobuf.Timestamp completed = 6 [(gogoproto.stdtime) = true ]; + string error = 7; // typed errors? +} + +message VertexStatus { + string ID = 1; + string vertex = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string name = 3; + int64 current = 4; + int64 total = 5; + // TODO: add started, completed + google.protobuf.Timestamp timestamp = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp started = 7 [(gogoproto.stdtime) = true ]; + google.protobuf.Timestamp completed = 8 [(gogoproto.stdtime) = true ]; +} + +message VertexLog { + string vertex = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + google.protobuf.Timestamp timestamp = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + int64 stream = 3; + bytes msg = 4; +} + +message BytesMessage { + bytes data = 1; +} + +message ListWorkersRequest { + repeated string filter = 1; // containerd style +} + +message ListWorkersResponse { + repeated moby.buildkit.v1.types.WorkerRecord record = 1; +} diff --git a/vendor/github.com/moby/buildkit/api/services/control/generate.go b/vendor/github.com/moby/buildkit/api/services/control/generate.go new file mode 100644 index 00000000..1c161155 --- /dev/null +++ b/vendor/github.com/moby/buildkit/api/services/control/generate.go @@ -0,0 +1,3 @@ +package moby_buildkit_v1 + +//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. control.proto diff --git a/vendor/github.com/moby/buildkit/api/types/generate.go b/vendor/github.com/moby/buildkit/api/types/generate.go new file mode 100644 index 00000000..84007df1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/api/types/generate.go @@ -0,0 +1,3 @@ +package moby_buildkit_v1_types + +//go:generate protoc -I=. -I=../../vendor/ -I=../../../../../ --gogo_out=plugins=grpc:. worker.proto diff --git a/vendor/github.com/moby/buildkit/api/types/worker.pb.go b/vendor/github.com/moby/buildkit/api/types/worker.pb.go new file mode 100644 index 00000000..728cccf7 --- /dev/null +++ b/vendor/github.com/moby/buildkit/api/types/worker.pb.go @@ -0,0 +1,929 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: worker.proto + +package moby_buildkit_v1_types + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + pb "github.com/moby/buildkit/solver/pb" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type WorkerRecord struct { + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=Labels,proto3" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Platforms []pb.Platform `protobuf:"bytes,3,rep,name=platforms,proto3" json:"platforms"` + GCPolicy []*GCPolicy `protobuf:"bytes,4,rep,name=GCPolicy,proto3" json:"GCPolicy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkerRecord) Reset() { *m = WorkerRecord{} } +func (m *WorkerRecord) String() string { return proto.CompactTextString(m) } +func (*WorkerRecord) ProtoMessage() {} +func (*WorkerRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_e4ff6184b07e587a, []int{0} +} +func (m *WorkerRecord) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkerRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkerRecord.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkerRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkerRecord.Merge(m, src) +} +func (m *WorkerRecord) XXX_Size() int { + return m.Size() +} +func (m *WorkerRecord) XXX_DiscardUnknown() { + xxx_messageInfo_WorkerRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkerRecord proto.InternalMessageInfo + +func (m *WorkerRecord) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *WorkerRecord) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *WorkerRecord) GetPlatforms() []pb.Platform { + if m != nil { + return m.Platforms + } + return nil +} + +func (m *WorkerRecord) GetGCPolicy() []*GCPolicy { + if m != nil { + return m.GCPolicy + } + return nil +} + +type GCPolicy struct { + All bool `protobuf:"varint,1,opt,name=all,proto3" json:"all,omitempty"` + KeepDuration int64 `protobuf:"varint,2,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"` + KeepBytes int64 `protobuf:"varint,3,opt,name=keepBytes,proto3" json:"keepBytes,omitempty"` + Filters []string `protobuf:"bytes,4,rep,name=filters,proto3" json:"filters,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GCPolicy) Reset() { *m = GCPolicy{} } +func (m *GCPolicy) String() string { return proto.CompactTextString(m) } +func (*GCPolicy) ProtoMessage() {} +func (*GCPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_e4ff6184b07e587a, []int{1} +} +func (m *GCPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GCPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GCPolicy.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GCPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_GCPolicy.Merge(m, src) +} +func (m *GCPolicy) XXX_Size() int { + return m.Size() +} +func (m *GCPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_GCPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_GCPolicy proto.InternalMessageInfo + +func (m *GCPolicy) GetAll() bool { + if m != nil { + return m.All + } + return false +} + +func (m *GCPolicy) GetKeepDuration() int64 { + if m != nil { + return m.KeepDuration + } + return 0 +} + +func (m *GCPolicy) GetKeepBytes() int64 { + if m != nil { + return m.KeepBytes + } + return 0 +} + +func (m *GCPolicy) GetFilters() []string { + if m != nil { + return m.Filters + } + return nil +} + +func init() { + proto.RegisterType((*WorkerRecord)(nil), "moby.buildkit.v1.types.WorkerRecord") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.types.WorkerRecord.LabelsEntry") + proto.RegisterType((*GCPolicy)(nil), "moby.buildkit.v1.types.GCPolicy") +} + +func init() { proto.RegisterFile("worker.proto", fileDescriptor_e4ff6184b07e587a) } + +var fileDescriptor_e4ff6184b07e587a = []byte{ + // 355 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x4e, 0xea, 0x40, + 0x14, 0x86, 0x6f, 0x5b, 0x2e, 0x97, 0x0e, 0xcd, 0x8d, 0x99, 0x18, 0xd3, 0x10, 0x83, 0x84, 0x15, + 0x0b, 0x9d, 0xa2, 0x6e, 0xd4, 0xb8, 0x42, 0x8c, 0x92, 0xb8, 0x20, 0xb3, 0x71, 0xdd, 0x81, 0x01, + 0x9b, 0x0e, 0x9c, 0xc9, 0x74, 0x8a, 0xf6, 0x39, 0x7c, 0x29, 0x96, 0x3e, 0x81, 0x31, 0x3c, 0x89, + 0x99, 0x29, 0x08, 0x26, 0xba, 0x3b, 0xff, 0x9f, 0xff, 0xfb, 0xe7, 0x9c, 0x0c, 0x0a, 0x9e, 0x41, + 0xa5, 0x5c, 0x11, 0xa9, 0x40, 0x03, 0x3e, 0x98, 0x01, 0x2b, 0x08, 0xcb, 0x13, 0x31, 0x4e, 0x13, + 0x4d, 0x16, 0xa7, 0x44, 0x17, 0x92, 0x67, 0x8d, 0x93, 0x69, 0xa2, 0x9f, 0x72, 0x46, 0x46, 0x30, + 0x8b, 0xa6, 0x30, 0x85, 0xc8, 0xc6, 0x59, 0x3e, 0xb1, 0xca, 0x0a, 0x3b, 0x95, 0x35, 0x8d, 0xe3, + 0x9d, 0xb8, 0x69, 0x8c, 0x36, 0x8d, 0x51, 0x06, 0x62, 0xc1, 0x55, 0x24, 0x59, 0x04, 0x32, 0x2b, + 0xd3, 0xed, 0x57, 0x17, 0x05, 0x8f, 0x76, 0x0b, 0xca, 0x47, 0xa0, 0xc6, 0xf8, 0x3f, 0x72, 0x07, + 0xfd, 0xd0, 0x69, 0x39, 0x1d, 0x9f, 0xba, 0x83, 0x3e, 0xbe, 0x47, 0xd5, 0x87, 0x98, 0x71, 0x91, + 0x85, 0x6e, 0xcb, 0xeb, 0xd4, 0xcf, 0xba, 0xe4, 0xe7, 0x35, 0xc9, 0x6e, 0x0b, 0x29, 0x91, 0xdb, + 0xb9, 0x56, 0x05, 0x5d, 0xf3, 0xb8, 0x8b, 0x7c, 0x29, 0x62, 0x3d, 0x01, 0x35, 0xcb, 0x42, 0xcf, + 0x96, 0x05, 0x44, 0x32, 0x32, 0x5c, 0x9b, 0xbd, 0xca, 0xf2, 0xfd, 0xe8, 0x0f, 0xdd, 0x86, 0xf0, + 0x35, 0xaa, 0xdd, 0xdd, 0x0c, 0x41, 0x24, 0xa3, 0x22, 0xac, 0x58, 0xa0, 0xf5, 0xdb, 0xeb, 0x9b, + 0x1c, 0xfd, 0x22, 0x1a, 0x97, 0xa8, 0xbe, 0xb3, 0x06, 0xde, 0x43, 0x5e, 0xca, 0x8b, 0xf5, 0x65, + 0x66, 0xc4, 0xfb, 0xe8, 0xef, 0x22, 0x16, 0x39, 0x0f, 0x5d, 0xeb, 0x95, 0xe2, 0xca, 0xbd, 0x70, + 0xda, 0x2f, 0xdb, 0x87, 0x0d, 0x17, 0x0b, 0x61, 0xb9, 0x1a, 0x35, 0x23, 0x6e, 0xa3, 0x20, 0xe5, + 0x5c, 0xf6, 0x73, 0x15, 0xeb, 0x04, 0xe6, 0x16, 0xf7, 0xe8, 0x37, 0x0f, 0x1f, 0x22, 0xdf, 0xe8, + 0x5e, 0xa1, 0xb9, 0x39, 0xd6, 0x04, 0xb6, 0x06, 0x0e, 0xd1, 0xbf, 0x49, 0x22, 0x34, 0x57, 0x99, + 0xbd, 0xcb, 0xa7, 0x1b, 0xd9, 0x0b, 0x96, 0xab, 0xa6, 0xf3, 0xb6, 0x6a, 0x3a, 0x1f, 0xab, 0xa6, + 0xc3, 0xaa, 0xf6, 0x93, 0xce, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x79, 0x52, 0x6a, 0x29, + 0x02, 0x00, 0x00, +} + +func (m *WorkerRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkerRecord) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkerRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.GCPolicy) > 0 { + for iNdEx := len(m.GCPolicy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.GCPolicy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorker(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Platforms) > 0 { + for iNdEx := len(m.Platforms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Platforms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorker(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintWorker(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintWorker(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintWorker(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ID) > 0 { + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintWorker(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GCPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GCPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GCPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Filters) > 0 { + for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Filters[iNdEx]) + copy(dAtA[i:], m.Filters[iNdEx]) + i = encodeVarintWorker(dAtA, i, uint64(len(m.Filters[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.KeepBytes != 0 { + i = encodeVarintWorker(dAtA, i, uint64(m.KeepBytes)) + i-- + dAtA[i] = 0x18 + } + if m.KeepDuration != 0 { + i = encodeVarintWorker(dAtA, i, uint64(m.KeepDuration)) + i-- + dAtA[i] = 0x10 + } + if m.All { + i-- + if m.All { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintWorker(dAtA []byte, offset int, v uint64) int { + offset -= sovWorker(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *WorkerRecord) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovWorker(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovWorker(uint64(len(k))) + 1 + len(v) + sovWorker(uint64(len(v))) + n += mapEntrySize + 1 + sovWorker(uint64(mapEntrySize)) + } + } + if len(m.Platforms) > 0 { + for _, e := range m.Platforms { + l = e.Size() + n += 1 + l + sovWorker(uint64(l)) + } + } + if len(m.GCPolicy) > 0 { + for _, e := range m.GCPolicy { + l = e.Size() + n += 1 + l + sovWorker(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *GCPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.All { + n += 2 + } + if m.KeepDuration != 0 { + n += 1 + sovWorker(uint64(m.KeepDuration)) + } + if m.KeepBytes != 0 { + n += 1 + sovWorker(uint64(m.KeepBytes)) + } + if len(m.Filters) > 0 { + for _, s := range m.Filters { + l = len(s) + n += 1 + l + sovWorker(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovWorker(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWorker(x uint64) (n int) { + return sovWorker(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *WorkerRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkerRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkerRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorker + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorker + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthWorker + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthWorker + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthWorker + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthWorker + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipWorker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorker + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Platforms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorker + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Platforms = append(m.Platforms, pb.Platform{}) + if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GCPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorker + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GCPolicy = append(m.GCPolicy, &GCPolicy{}) + if err := m.GCPolicy[len(m.GCPolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorker + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GCPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GCPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GCPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.All = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepDuration", wireType) + } + m.KeepDuration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KeepDuration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepBytes", wireType) + } + m.KeepBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KeepBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorker + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorker + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWorker(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWorker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWorker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWorker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWorker + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupWorker + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthWorker + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthWorker = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWorker = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupWorker = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/moby/buildkit/api/types/worker.proto b/vendor/github.com/moby/buildkit/api/types/worker.proto new file mode 100644 index 00000000..82dd7ad6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/api/types/worker.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package moby.buildkit.v1.types; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "github.com/moby/buildkit/solver/pb/ops.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +message WorkerRecord { + string ID = 1; + map Labels = 2; + repeated pb.Platform platforms = 3 [(gogoproto.nullable) = false]; + repeated GCPolicy GCPolicy = 4; +} + +message GCPolicy { + bool all = 1; + int64 keepDuration = 2; + int64 keepBytes = 3; + repeated string filters = 4; +} diff --git a/vendor/github.com/moby/buildkit/cache/blobs/blobs.go b/vendor/github.com/moby/buildkit/cache/blobs/blobs.go new file mode 100644 index 00000000..55640f64 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/blobs/blobs.go @@ -0,0 +1,194 @@ +package blobs + +import ( + "context" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/mount" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/util/winlayers" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +var g flightcontrol.Group + +const containerdUncompressed = "containerd.io/uncompressed" + +type DiffPair struct { + DiffID digest.Digest + Blobsum digest.Digest +} + +type CompareWithParent interface { + CompareWithParent(ctx context.Context, ref string, opts ...diff.Opt) (ocispec.Descriptor, error) +} + +var ErrNoBlobs = errors.Errorf("no blobs for snapshot") + +// GetDiffPairs returns the DiffID/Blobsum pairs for a giver reference and saves it. +// Caller must hold a lease when calling this function. +func GetDiffPairs(ctx context.Context, contentStore content.Store, differ diff.Comparer, ref cache.ImmutableRef, createBlobs bool, compression CompressionType) ([]DiffPair, error) { + if ref == nil { + return nil, nil + } + + if _, ok := leases.FromContext(ctx); !ok { + return nil, errors.Errorf("missing lease requirement for GetDiffPairs") + } + + if err := ref.Finalize(ctx, true); err != nil { + return nil, err + } + + if isTypeWindows(ref) { + ctx = winlayers.UseWindowsLayerMode(ctx) + } + + return getDiffPairs(ctx, contentStore, differ, ref, createBlobs, compression) +} + +func getDiffPairs(ctx context.Context, contentStore content.Store, differ diff.Comparer, ref cache.ImmutableRef, createBlobs bool, compression CompressionType) ([]DiffPair, error) { + if ref == nil { + return nil, nil + } + + baseCtx := ctx + eg, ctx := errgroup.WithContext(ctx) + var diffPairs []DiffPair + var currentDescr ocispec.Descriptor + parent := ref.Parent() + if parent != nil { + defer parent.Release(context.TODO()) + eg.Go(func() error { + dp, err := getDiffPairs(ctx, contentStore, differ, parent, createBlobs, compression) + if err != nil { + return err + } + diffPairs = dp + return nil + }) + } + eg.Go(func() error { + dp, err := g.Do(ctx, ref.ID(), func(ctx context.Context) (interface{}, error) { + refInfo := ref.Info() + if refInfo.Blob != "" { + return nil, nil + } else if !createBlobs { + return nil, errors.WithStack(ErrNoBlobs) + } + + var mediaType string + var descr ocispec.Descriptor + var err error + + switch compression { + case Uncompressed: + mediaType = ocispec.MediaTypeImageLayer + case Gzip: + mediaType = ocispec.MediaTypeImageLayerGzip + default: + return nil, errors.Errorf("unknown layer compression type") + } + + if pc, ok := differ.(CompareWithParent); ok { + descr, err = pc.CompareWithParent(ctx, ref.ID(), diff.WithMediaType(mediaType)) + if err != nil { + return nil, err + } + } + if descr.Digest == "" { + // reference needs to be committed + parent := ref.Parent() + var lower []mount.Mount + var release func() error + if parent != nil { + defer parent.Release(context.TODO()) + m, err := parent.Mount(ctx, true) + if err != nil { + return nil, err + } + lower, release, err = m.Mount() + if err != nil { + return nil, err + } + if release != nil { + defer release() + } + } + m, err := ref.Mount(ctx, true) + if err != nil { + return nil, err + } + upper, release, err := m.Mount() + if err != nil { + return nil, err + } + if release != nil { + defer release() + } + descr, err = differ.Compare(ctx, lower, upper, + diff.WithMediaType(mediaType), + diff.WithReference(ref.ID()), + ) + if err != nil { + return nil, err + } + } + + if descr.Annotations == nil { + descr.Annotations = map[string]string{} + } + + info, err := contentStore.Info(ctx, descr.Digest) + if err != nil { + return nil, err + } + + if diffID, ok := info.Labels[containerdUncompressed]; ok { + descr.Annotations[containerdUncompressed] = diffID + } else if compression == Uncompressed { + descr.Annotations[containerdUncompressed] = descr.Digest.String() + } else { + return nil, errors.Errorf("unknown layer compression type") + } + return descr, nil + + }) + if err != nil { + return err + } + + if dp != nil { + currentDescr = dp.(ocispec.Descriptor) + } + return nil + }) + err := eg.Wait() + if err != nil { + return nil, err + } + if currentDescr.Digest != "" { + if err := ref.SetBlob(baseCtx, currentDescr); err != nil { + return nil, err + } + } + refInfo := ref.Info() + return append(diffPairs, DiffPair{DiffID: refInfo.DiffID, Blobsum: refInfo.Blob}), nil +} + +func isTypeWindows(ref cache.ImmutableRef) bool { + if cache.GetLayerType(ref) == "windows" { + return true + } + if parent := ref.Parent(); parent != nil { + defer parent.Release(context.TODO()) + return isTypeWindows(parent) + } + return false +} diff --git a/vendor/github.com/moby/buildkit/cache/blobs/compression.go b/vendor/github.com/moby/buildkit/cache/blobs/compression.go new file mode 100644 index 00000000..6bb7afea --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/blobs/compression.go @@ -0,0 +1,122 @@ +package blobs + +import ( + "bytes" + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/moby/buildkit/cache" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// CompressionType represents compression type for blob data. +type CompressionType int + +const ( + // Uncompressed indicates no compression. + Uncompressed CompressionType = iota + + // Gzip is used for blob data. + Gzip + + // UnknownCompression means not supported yet. + UnknownCompression CompressionType = -1 +) + +var DefaultCompression = Gzip + +func (ct CompressionType) String() string { + switch ct { + case Uncompressed: + return "uncompressed" + case Gzip: + return "gzip" + default: + return "unknown" + } +} + +// DetectCompressionType returns media type from existing blob data. +func DetectLayerMediaType(ctx context.Context, cs content.Store, id digest.Digest, oci bool) (string, error) { + ra, err := cs.ReaderAt(ctx, ocispec.Descriptor{Digest: id}) + if err != nil { + return "", err + } + defer ra.Close() + + ct, err := detectCompressionType(content.NewReader(ra)) + if err != nil { + return "", err + } + + switch ct { + case Uncompressed: + if oci { + return ocispec.MediaTypeImageLayer, nil + } else { + return images.MediaTypeDockerSchema2Layer, nil + } + case Gzip: + if oci { + return ocispec.MediaTypeImageLayerGzip, nil + } else { + return images.MediaTypeDockerSchema2LayerGzip, nil + } + default: + return "", errors.Errorf("failed to detect layer %v compression type", id) + } +} + +// detectCompressionType detects compression type from real blob data. +func detectCompressionType(cr io.Reader) (CompressionType, error) { + var buf [10]byte + var n int + var err error + + if n, err = cr.Read(buf[:]); err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some + // odd cases where the layer.tar file will be empty (zero bytes) + // and we'll just treat it as a non-compressed stream and that + // means just create an empty layer. + // + // See issue docker/docker#18170 + return UnknownCompression, err + } + + for c, m := range map[CompressionType][]byte{ + Gzip: {0x1F, 0x8B, 0x08}, + } { + if n < len(m) { + continue + } + if bytes.Equal(m, buf[:len(m)]) { + return c, nil + } + } + return Uncompressed, nil +} + +// GetMediaTypeForLayers retrieves media type for layer from ref information. +func GetMediaTypeForLayers(diffPairs []DiffPair, ref cache.ImmutableRef) []string { + tref := ref + + layerTypes := make([]string, 0, len(diffPairs)) + for _, dp := range diffPairs { + if tref == nil { + return nil + } + + info := tref.Info() + if !(info.DiffID == dp.DiffID && info.Blob == dp.Blobsum) { + return nil + } + + layerTypes = append(layerTypes, info.MediaType) + tref = tref.Parent() + } + return layerTypes +} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go new file mode 100644 index 00000000..5d42d041 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go @@ -0,0 +1,909 @@ +package contenthash + +import ( + "bytes" + "context" + "crypto/sha256" + "io" + "os" + "path" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + iradix "github.com/hashicorp/go-immutable-radix" + "github.com/hashicorp/golang-lru/simplelru" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/snapshot" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" + fstypes "github.com/tonistiigi/fsutil/types" +) + +var errNotFound = errors.Errorf("not found") + +var defaultManager *cacheManager +var defaultManagerOnce sync.Once + +const keyContentHash = "buildkit.contenthash.v0" + +func getDefaultManager() *cacheManager { + defaultManagerOnce.Do(func() { + lru, _ := simplelru.NewLRU(20, nil) // error is impossible on positive size + defaultManager = &cacheManager{lru: lru, locker: locker.New()} + }) + return defaultManager +} + +// Layout in the radix tree: Every path is saved by cleaned absolute unix path. +// Directories have 2 records, one contains digest for directory header, other +// the recursive digest for directory contents. "/dir/" is the record for +// header, "/dir" is for contents. For the root node "" (empty string) is the +// key for root, "/" for the root header + +func Checksum(ctx context.Context, ref cache.ImmutableRef, path string, followLinks bool) (digest.Digest, error) { + return getDefaultManager().Checksum(ctx, ref, path, followLinks) +} + +func ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, path string, followLinks bool) (digest.Digest, error) { + return getDefaultManager().ChecksumWildcard(ctx, ref, path, followLinks) +} + +func GetCacheContext(ctx context.Context, md *metadata.StorageItem, idmap *idtools.IdentityMapping) (CacheContext, error) { + return getDefaultManager().GetCacheContext(ctx, md, idmap) +} + +func SetCacheContext(ctx context.Context, md *metadata.StorageItem, cc CacheContext) error { + return getDefaultManager().SetCacheContext(ctx, md, cc) +} + +func ClearCacheContext(md *metadata.StorageItem) { + getDefaultManager().clearCacheContext(md.ID()) +} + +type CacheContext interface { + Checksum(ctx context.Context, ref cache.Mountable, p string, followLinks bool) (digest.Digest, error) + ChecksumWildcard(ctx context.Context, ref cache.Mountable, p string, followLinks bool) (digest.Digest, error) + HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) error +} + +type Hashed interface { + Digest() digest.Digest +} + +type Wildcard struct { + Path string + Record *CacheRecord +} + +type cacheManager struct { + locker *locker.Locker + lru *simplelru.LRU + lruMu sync.Mutex +} + +func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool) (digest.Digest, error) { + cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()), ref.IdentityMapping()) + if err != nil { + return "", nil + } + return cc.Checksum(ctx, ref, p, followLinks) +} + +func (cm *cacheManager) ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool) (digest.Digest, error) { + cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()), ref.IdentityMapping()) + if err != nil { + return "", nil + } + return cc.ChecksumWildcard(ctx, ref, p, followLinks) +} + +func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.StorageItem, idmap *idtools.IdentityMapping) (CacheContext, error) { + cm.locker.Lock(md.ID()) + cm.lruMu.Lock() + v, ok := cm.lru.Get(md.ID()) + cm.lruMu.Unlock() + if ok { + cm.locker.Unlock(md.ID()) + v.(*cacheContext).linkMap = map[string][][]byte{} + return v.(*cacheContext), nil + } + cc, err := newCacheContext(md, idmap) + if err != nil { + cm.locker.Unlock(md.ID()) + return nil, err + } + cm.lruMu.Lock() + cm.lru.Add(md.ID(), cc) + cm.lruMu.Unlock() + cm.locker.Unlock(md.ID()) + return cc, nil +} + +func (cm *cacheManager) SetCacheContext(ctx context.Context, md *metadata.StorageItem, cci CacheContext) error { + cc, ok := cci.(*cacheContext) + if !ok { + return errors.Errorf("invalid cachecontext: %T", cc) + } + if md.ID() != cc.md.ID() { + cc = &cacheContext{ + md: md, + tree: cci.(*cacheContext).tree, + dirtyMap: map[string]struct{}{}, + linkMap: map[string][][]byte{}, + } + } else { + if err := cc.save(); err != nil { + return err + } + } + cm.lruMu.Lock() + cm.lru.Add(md.ID(), cc) + cm.lruMu.Unlock() + return nil +} + +func (cm *cacheManager) clearCacheContext(id string) { + cm.lruMu.Lock() + cm.lru.Remove(id) + cm.lruMu.Unlock() +} + +type cacheContext struct { + mu sync.RWMutex + md *metadata.StorageItem + tree *iradix.Tree + dirty bool // needs to be persisted to disk + + // used in HandleChange + txn *iradix.Txn + node *iradix.Node + dirtyMap map[string]struct{} + linkMap map[string][][]byte + idmap *idtools.IdentityMapping +} + +type mount struct { + mountable cache.Mountable + mountPath string + unmount func() error +} + +func (m *mount) mount(ctx context.Context) (string, error) { + if m.mountPath != "" { + return m.mountPath, nil + } + mounts, err := m.mountable.Mount(ctx, true) + if err != nil { + return "", err + } + + lm := snapshot.LocalMounter(mounts) + + mp, err := lm.Mount() + if err != nil { + return "", err + } + + m.mountPath = mp + m.unmount = lm.Unmount + return mp, nil +} + +func (m *mount) clean() error { + if m.mountPath != "" { + if err := m.unmount(); err != nil { + return err + } + m.mountPath = "" + } + return nil +} + +func newCacheContext(md *metadata.StorageItem, idmap *idtools.IdentityMapping) (*cacheContext, error) { + cc := &cacheContext{ + md: md, + tree: iradix.New(), + dirtyMap: map[string]struct{}{}, + linkMap: map[string][][]byte{}, + idmap: idmap, + } + if err := cc.load(); err != nil { + return nil, err + } + return cc, nil +} + +func (cc *cacheContext) load() error { + dt, err := cc.md.GetExternal(keyContentHash) + if err != nil { + return nil + } + + var l CacheRecords + if err := l.Unmarshal(dt); err != nil { + return err + } + + txn := cc.tree.Txn() + for _, p := range l.Paths { + txn.Insert([]byte(p.Path), p.Record) + } + cc.tree = txn.Commit() + return nil +} + +func (cc *cacheContext) save() error { + cc.mu.Lock() + defer cc.mu.Unlock() + + if cc.txn != nil { + cc.commitActiveTransaction() + } + + var l CacheRecords + node := cc.tree.Root() + node.Walk(func(k []byte, v interface{}) bool { + l.Paths = append(l.Paths, &CacheRecordWithPath{ + Path: string(k), + Record: v.(*CacheRecord), + }) + return false + }) + + dt, err := l.Marshal() + if err != nil { + return err + } + + return cc.md.SetExternal(keyContentHash, dt) +} + +// HandleChange notifies the source about a modification operation +func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { + p = path.Join("/", filepath.ToSlash(p)) + if p == "/" { + p = "" + } + k := convertPathToKey([]byte(p)) + + deleteDir := func(cr *CacheRecord) { + if cr.Type == CacheRecordTypeDir { + cc.node.WalkPrefix(append(k, 0), func(k []byte, v interface{}) bool { + cc.txn.Delete(k) + return false + }) + } + } + + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.txn == nil { + cc.txn = cc.tree.Txn() + cc.node = cc.tree.Root() + + // root is not called by HandleChange. need to fake it + if _, ok := cc.node.Get([]byte{0}); !ok { + cc.txn.Insert([]byte{0}, &CacheRecord{ + Type: CacheRecordTypeDirHeader, + Digest: digest.FromBytes(nil), + }) + cc.txn.Insert([]byte(""), &CacheRecord{ + Type: CacheRecordTypeDir, + }) + } + } + + if kind == fsutil.ChangeKindDelete { + v, ok := cc.txn.Delete(k) + if ok { + deleteDir(v.(*CacheRecord)) + } + d := path.Dir(p) + if d == "/" { + d = "" + } + cc.dirtyMap[d] = struct{}{} + return + } + + stat, ok := fi.Sys().(*fstypes.Stat) + if !ok { + return errors.Errorf("%s invalid change without stat information", p) + } + + h, ok := fi.(Hashed) + if !ok { + return errors.Errorf("invalid fileinfo: %s", p) + } + + v, ok := cc.node.Get(k) + if ok { + deleteDir(v.(*CacheRecord)) + } + + cr := &CacheRecord{ + Type: CacheRecordTypeFile, + } + if fi.Mode()&os.ModeSymlink != 0 { + cr.Type = CacheRecordTypeSymlink + cr.Linkname = filepath.ToSlash(stat.Linkname) + } + if fi.IsDir() { + cr.Type = CacheRecordTypeDirHeader + cr2 := &CacheRecord{ + Type: CacheRecordTypeDir, + } + cc.txn.Insert(k, cr2) + k = append(k, 0) + p += "/" + } + cr.Digest = h.Digest() + + // if we receive a hardlink just use the digest of the source + // note that the source may be called later because data writing is async + if fi.Mode()&os.ModeSymlink == 0 && stat.Linkname != "" { + ln := path.Join("/", filepath.ToSlash(stat.Linkname)) + v, ok := cc.txn.Get(convertPathToKey([]byte(ln))) + if ok { + cp := *v.(*CacheRecord) + cr = &cp + } + cc.linkMap[ln] = append(cc.linkMap[ln], k) + } + + cc.txn.Insert(k, cr) + if !fi.IsDir() { + if links, ok := cc.linkMap[p]; ok { + for _, l := range links { + pp := convertKeyToPath(l) + cc.txn.Insert(l, cr) + d := path.Dir(string(pp)) + if d == "/" { + d = "" + } + cc.dirtyMap[d] = struct{}{} + } + delete(cc.linkMap, p) + } + } + + d := path.Dir(p) + if d == "/" { + d = "" + } + cc.dirtyMap[d] = struct{}{} + + return nil +} + +func (cc *cacheContext) ChecksumWildcard(ctx context.Context, mountable cache.Mountable, p string, followLinks bool) (digest.Digest, error) { + m := &mount{mountable: mountable} + defer m.clean() + + wildcards, err := cc.wildcards(ctx, m, p) + if err != nil { + return "", nil + } + + if followLinks { + for i, w := range wildcards { + if w.Record.Type == CacheRecordTypeSymlink { + dgst, err := cc.checksumFollow(ctx, m, w.Path, followLinks) + if err != nil { + return "", err + } + wildcards[i].Record = &CacheRecord{Digest: dgst} + } + } + } + if len(wildcards) == 0 { + return digest.FromBytes([]byte{}), nil + } + + if len(wildcards) > 1 { + digester := digest.Canonical.Digester() + for i, w := range wildcards { + if i != 0 { + digester.Hash().Write([]byte{0}) + } + digester.Hash().Write([]byte(w.Record.Digest)) + } + return digester.Digest(), nil + } else { + return wildcards[0].Record.Digest, nil + } +} + +func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, p string, followLinks bool) (digest.Digest, error) { + m := &mount{mountable: mountable} + defer m.clean() + + return cc.checksumFollow(ctx, m, p, followLinks) +} + +func (cc *cacheContext) checksumFollow(ctx context.Context, m *mount, p string, follow bool) (digest.Digest, error) { + const maxSymlinkLimit = 255 + i := 0 + for { + if i > maxSymlinkLimit { + return "", errors.Errorf("too many symlinks: %s", p) + } + cr, err := cc.checksumNoFollow(ctx, m, p) + if err != nil { + return "", err + } + if cr.Type == CacheRecordTypeSymlink && follow { + link := cr.Linkname + if !path.IsAbs(cr.Linkname) { + link = path.Join(path.Dir(p), link) + } + i++ + p = link + } else { + return cr.Digest, nil + } + } +} + +func (cc *cacheContext) wildcards(ctx context.Context, m *mount, p string) ([]*Wildcard, error) { + cc.mu.Lock() + defer cc.mu.Unlock() + + if cc.txn != nil { + cc.commitActiveTransaction() + } + + root := cc.tree.Root() + scan, err := cc.needsScan(root, "") + if err != nil { + return nil, err + } + if scan { + if err := cc.scanPath(ctx, m, ""); err != nil { + return nil, err + } + } + + defer func() { + if cc.dirty { + go cc.save() + cc.dirty = false + } + }() + + p = path.Join("/", filepath.ToSlash(p)) + if p == "/" { + p = "" + } + + wildcards := make([]*Wildcard, 0, 2) + + txn := cc.tree.Txn() + root = txn.Root() + var updated bool + + iter := root.Seek([]byte{}) + for { + k, _, ok := iter.Next() + if !ok { + break + } + if len(k) > 0 && k[len(k)-1] == byte(0) { + continue + } + fn := convertKeyToPath(k) + b, err := path.Match(p, string(fn)) + if err != nil { + return nil, err + } + if !b { + continue + } + + cr, upt, err := cc.checksum(ctx, root, txn, m, k, false) + if err != nil { + return nil, err + } + if upt { + updated = true + } + + wildcards = append(wildcards, &Wildcard{Path: string(fn), Record: cr}) + + if cr.Type == CacheRecordTypeDir { + iter = root.Seek(append(k, 0, 0xff)) + } + } + + cc.tree = txn.Commit() + cc.dirty = updated + + return wildcards, nil +} + +func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string) (*CacheRecord, error) { + p = path.Join("/", filepath.ToSlash(p)) + if p == "/" { + p = "" + } + + cc.mu.RLock() + if cc.txn == nil { + root := cc.tree.Root() + cc.mu.RUnlock() + v, ok := root.Get(convertPathToKey([]byte(p))) + if ok { + cr := v.(*CacheRecord) + if cr.Digest != "" { + return cr, nil + } + } + } else { + cc.mu.RUnlock() + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + if cc.txn != nil { + cc.commitActiveTransaction() + } + + defer func() { + if cc.dirty { + go cc.save() + cc.dirty = false + } + }() + + return cc.lazyChecksum(ctx, m, p) +} + +func (cc *cacheContext) commitActiveTransaction() { + for d := range cc.dirtyMap { + addParentToMap(d, cc.dirtyMap) + } + for d := range cc.dirtyMap { + k := convertPathToKey([]byte(d)) + if _, ok := cc.txn.Get(k); ok { + cc.txn.Insert(k, &CacheRecord{Type: CacheRecordTypeDir}) + } + } + cc.tree = cc.txn.Commit() + cc.node = nil + cc.dirtyMap = map[string]struct{}{} + cc.txn = nil +} + +func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (*CacheRecord, error) { + root := cc.tree.Root() + scan, err := cc.needsScan(root, p) + if err != nil { + return nil, err + } + if scan { + if err := cc.scanPath(ctx, m, p); err != nil { + return nil, err + } + } + k := convertPathToKey([]byte(p)) + txn := cc.tree.Txn() + root = txn.Root() + cr, updated, err := cc.checksum(ctx, root, txn, m, k, true) + if err != nil { + return nil, err + } + cc.tree = txn.Commit() + cc.dirty = updated + return cr, err +} + +func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, m *mount, k []byte, follow bool) (*CacheRecord, bool, error) { + origk := k + k, cr, err := getFollowLinks(root, k, follow) + if err != nil { + return nil, false, err + } + if cr == nil { + return nil, false, errors.Wrapf(errNotFound, "%q not found", convertKeyToPath(origk)) + } + if cr.Digest != "" { + return cr, false, nil + } + var dgst digest.Digest + + switch cr.Type { + case CacheRecordTypeDir: + h := sha256.New() + next := append(k, 0) + iter := root.Seek(next) + subk := next + ok := true + for { + if !ok || !bytes.HasPrefix(subk, next) { + break + } + h.Write(bytes.TrimPrefix(subk, k)) + + subcr, _, err := cc.checksum(ctx, root, txn, m, subk, true) + if err != nil { + return nil, false, err + } + + h.Write([]byte(subcr.Digest)) + + if subcr.Type == CacheRecordTypeDir { // skip subfiles + next := append(subk, 0, 0xff) + iter = root.Seek(next) + } + subk, _, ok = iter.Next() + } + dgst = digest.NewDigest(digest.SHA256, h) + + default: + p := string(convertKeyToPath(bytes.TrimSuffix(k, []byte{0}))) + + target, err := m.mount(ctx) + if err != nil { + return nil, false, err + } + + // no FollowSymlinkInScope because invalid paths should not be inserted + fp := filepath.Join(target, filepath.FromSlash(p)) + + fi, err := os.Lstat(fp) + if err != nil { + return nil, false, err + } + + dgst, err = prepareDigest(fp, p, fi) + if err != nil { + return nil, false, err + } + } + + cr2 := &CacheRecord{ + Digest: dgst, + Type: cr.Type, + Linkname: cr.Linkname, + } + + txn.Insert(k, cr2) + + return cr2, true, nil +} + +// needsScan returns false if path is in the tree or a parent path is in tree +// and subpath is missing +func (cc *cacheContext) needsScan(root *iradix.Node, p string) (bool, error) { + var linksWalked int + return cc.needsScanFollow(root, p, &linksWalked) +} + +func (cc *cacheContext) needsScanFollow(root *iradix.Node, p string, linksWalked *int) (bool, error) { + if p == "/" { + p = "" + } + if v, ok := root.Get(convertPathToKey([]byte(p))); !ok { + if p == "" { + return true, nil + } + return cc.needsScanFollow(root, path.Clean(path.Dir(p)), linksWalked) + } else { + cr := v.(*CacheRecord) + if cr.Type == CacheRecordTypeSymlink { + if *linksWalked > 255 { + return false, errTooManyLinks + } + *linksWalked++ + link := path.Clean(cr.Linkname) + if !path.IsAbs(cr.Linkname) { + link = path.Join("/", path.Dir(p), link) + } + return cc.needsScanFollow(root, link, linksWalked) + } + } + return false, nil +} + +func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retErr error) { + p = path.Join("/", p) + d, _ := path.Split(p) + + mp, err := m.mount(ctx) + if err != nil { + return err + } + + n := cc.tree.Root() + txn := cc.tree.Txn() + + parentPath, err := rootPath(mp, filepath.FromSlash(d), func(p, link string) error { + cr := &CacheRecord{ + Type: CacheRecordTypeSymlink, + Linkname: filepath.ToSlash(link), + } + k := []byte(filepath.Join("/", filepath.ToSlash(p))) + k = convertPathToKey(k) + txn.Insert(k, cr) + return nil + }) + if err != nil { + return err + } + + err = filepath.Walk(parentPath, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return errors.Wrapf(err, "failed to walk %s", path) + } + rel, err := filepath.Rel(mp, path) + if err != nil { + return err + } + k := []byte(filepath.Join("/", filepath.ToSlash(rel))) + if string(k) == "/" { + k = []byte{} + } + k = convertPathToKey(k) + if _, ok := n.Get(k); !ok { + cr := &CacheRecord{ + Type: CacheRecordTypeFile, + } + if fi.Mode()&os.ModeSymlink != 0 { + cr.Type = CacheRecordTypeSymlink + link, err := os.Readlink(path) + if err != nil { + return err + } + cr.Linkname = filepath.ToSlash(link) + } + if fi.IsDir() { + cr.Type = CacheRecordTypeDirHeader + cr2 := &CacheRecord{ + Type: CacheRecordTypeDir, + } + txn.Insert(k, cr2) + k = append(k, 0) + } + txn.Insert(k, cr) + } + return nil + }) + if err != nil { + return err + } + + cc.tree = txn.Commit() + return nil +} + +func getFollowLinks(root *iradix.Node, k []byte, follow bool) ([]byte, *CacheRecord, error) { + var linksWalked int + return getFollowLinksWalk(root, k, follow, &linksWalked) +} + +func getFollowLinksWalk(root *iradix.Node, k []byte, follow bool, linksWalked *int) ([]byte, *CacheRecord, error) { + v, ok := root.Get(k) + if ok { + return k, v.(*CacheRecord), nil + } + if !follow || len(k) == 0 { + return k, nil, nil + } + + dir, file := splitKey(k) + + k, parent, err := getFollowLinksWalk(root, dir, follow, linksWalked) + if err != nil { + return nil, nil, err + } + if parent != nil { + if parent.Type == CacheRecordTypeSymlink { + *linksWalked++ + if *linksWalked > 255 { + return nil, nil, errors.Errorf("too many links") + } + dirPath := path.Clean(string(convertKeyToPath(dir))) + if dirPath == "." || dirPath == "/" { + dirPath = "" + } + link := path.Clean(parent.Linkname) + if !path.IsAbs(link) { + link = path.Join("/", path.Join(path.Dir(dirPath), link)) + } + return getFollowLinksWalk(root, append(convertPathToKey([]byte(link)), file...), follow, linksWalked) + } + } + k = append(k, file...) + v, ok = root.Get(k) + if ok { + return k, v.(*CacheRecord), nil + } + return k, nil, nil +} + +func prepareDigest(fp, p string, fi os.FileInfo) (digest.Digest, error) { + h, err := NewFileHash(fp, fi) + if err != nil { + return "", errors.Wrapf(err, "failed to create hash for %s", p) + } + if fi.Mode().IsRegular() && fi.Size() > 0 { + // TODO: would be nice to put the contents to separate hash first + // so it can be cached for hardlinks + f, err := os.Open(fp) + if err != nil { + return "", errors.Wrapf(err, "failed to open %s", p) + } + defer f.Close() + if _, err := poolsCopy(h, f); err != nil { + return "", errors.Wrapf(err, "failed to copy file data for %s", p) + } + } + return digest.NewDigest(digest.SHA256, h), nil +} + +func addParentToMap(d string, m map[string]struct{}) { + if d == "" { + return + } + d = path.Dir(d) + if d == "/" { + d = "" + } + m[d] = struct{}{} + addParentToMap(d, m) +} + +func ensureOriginMetadata(md *metadata.StorageItem) *metadata.StorageItem { + v := md.Get("cache.equalMutable") // TODO: const + if v == nil { + return md + } + var mutable string + if err := v.Unmarshal(&mutable); err != nil { + return md + } + si, ok := md.Storage().Get(mutable) + if ok { + return si + } + return md +} + +var pool32K = sync.Pool{ + New: func() interface{} { return make([]byte, 32*1024) }, // 32K +} + +func poolsCopy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := pool32K.Get().([]byte) + written, err = io.CopyBuffer(dst, src, buf) + pool32K.Put(buf) + return +} + +func convertPathToKey(p []byte) []byte { + return bytes.Replace([]byte(p), []byte("/"), []byte{0}, -1) +} + +func convertKeyToPath(p []byte) []byte { + return bytes.Replace([]byte(p), []byte{0}, []byte("/"), -1) +} + +func splitKey(k []byte) ([]byte, []byte) { + foundBytes := false + i := len(k) - 1 + for { + if i <= 0 || foundBytes && k[i] == 0 { + break + } + if k[i] != 0 { + foundBytes = true + } + i-- + } + return append([]byte{}, k[:i]...), k[i:] +} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go new file mode 100644 index 00000000..9ac2de45 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go @@ -0,0 +1,864 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: checksum.proto + +package contenthash + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type CacheRecordType int32 + +const ( + CacheRecordTypeFile CacheRecordType = 0 + CacheRecordTypeDir CacheRecordType = 1 + CacheRecordTypeDirHeader CacheRecordType = 2 + CacheRecordTypeSymlink CacheRecordType = 3 +) + +var CacheRecordType_name = map[int32]string{ + 0: "FILE", + 1: "DIR", + 2: "DIR_HEADER", + 3: "SYMLINK", +} + +var CacheRecordType_value = map[string]int32{ + "FILE": 0, + "DIR": 1, + "DIR_HEADER": 2, + "SYMLINK": 3, +} + +func (x CacheRecordType) String() string { + return proto.EnumName(CacheRecordType_name, int32(x)) +} + +func (CacheRecordType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_843938c28b799986, []int{0} +} + +type CacheRecord struct { + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Type CacheRecordType `protobuf:"varint,2,opt,name=type,proto3,enum=contenthash.CacheRecordType" json:"type,omitempty"` + Linkname string `protobuf:"bytes,3,opt,name=linkname,proto3" json:"linkname,omitempty"` +} + +func (m *CacheRecord) Reset() { *m = CacheRecord{} } +func (m *CacheRecord) String() string { return proto.CompactTextString(m) } +func (*CacheRecord) ProtoMessage() {} +func (*CacheRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_843938c28b799986, []int{0} +} +func (m *CacheRecord) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CacheRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CacheRecord.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CacheRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_CacheRecord.Merge(m, src) +} +func (m *CacheRecord) XXX_Size() int { + return m.Size() +} +func (m *CacheRecord) XXX_DiscardUnknown() { + xxx_messageInfo_CacheRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_CacheRecord proto.InternalMessageInfo + +func (m *CacheRecord) GetType() CacheRecordType { + if m != nil { + return m.Type + } + return CacheRecordTypeFile +} + +func (m *CacheRecord) GetLinkname() string { + if m != nil { + return m.Linkname + } + return "" +} + +type CacheRecordWithPath struct { + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Record *CacheRecord `protobuf:"bytes,2,opt,name=record,proto3" json:"record,omitempty"` +} + +func (m *CacheRecordWithPath) Reset() { *m = CacheRecordWithPath{} } +func (m *CacheRecordWithPath) String() string { return proto.CompactTextString(m) } +func (*CacheRecordWithPath) ProtoMessage() {} +func (*CacheRecordWithPath) Descriptor() ([]byte, []int) { + return fileDescriptor_843938c28b799986, []int{1} +} +func (m *CacheRecordWithPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CacheRecordWithPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CacheRecordWithPath.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CacheRecordWithPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_CacheRecordWithPath.Merge(m, src) +} +func (m *CacheRecordWithPath) XXX_Size() int { + return m.Size() +} +func (m *CacheRecordWithPath) XXX_DiscardUnknown() { + xxx_messageInfo_CacheRecordWithPath.DiscardUnknown(m) +} + +var xxx_messageInfo_CacheRecordWithPath proto.InternalMessageInfo + +func (m *CacheRecordWithPath) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *CacheRecordWithPath) GetRecord() *CacheRecord { + if m != nil { + return m.Record + } + return nil +} + +type CacheRecords struct { + Paths []*CacheRecordWithPath `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` +} + +func (m *CacheRecords) Reset() { *m = CacheRecords{} } +func (m *CacheRecords) String() string { return proto.CompactTextString(m) } +func (*CacheRecords) ProtoMessage() {} +func (*CacheRecords) Descriptor() ([]byte, []int) { + return fileDescriptor_843938c28b799986, []int{2} +} +func (m *CacheRecords) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CacheRecords) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CacheRecords.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CacheRecords) XXX_Merge(src proto.Message) { + xxx_messageInfo_CacheRecords.Merge(m, src) +} +func (m *CacheRecords) XXX_Size() int { + return m.Size() +} +func (m *CacheRecords) XXX_DiscardUnknown() { + xxx_messageInfo_CacheRecords.DiscardUnknown(m) +} + +var xxx_messageInfo_CacheRecords proto.InternalMessageInfo + +func (m *CacheRecords) GetPaths() []*CacheRecordWithPath { + if m != nil { + return m.Paths + } + return nil +} + +func init() { + proto.RegisterEnum("contenthash.CacheRecordType", CacheRecordType_name, CacheRecordType_value) + proto.RegisterType((*CacheRecord)(nil), "contenthash.CacheRecord") + proto.RegisterType((*CacheRecordWithPath)(nil), "contenthash.CacheRecordWithPath") + proto.RegisterType((*CacheRecords)(nil), "contenthash.CacheRecords") +} + +func init() { proto.RegisterFile("checksum.proto", fileDescriptor_843938c28b799986) } + +var fileDescriptor_843938c28b799986 = []byte{ + // 426 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xc1, 0x6a, 0x13, 0x41, + 0x18, 0xc7, 0x77, 0x9a, 0x18, 0xf5, 0x8b, 0xd4, 0x30, 0x85, 0x76, 0x19, 0xca, 0x64, 0xcc, 0xc5, + 0x50, 0xec, 0xa6, 0x44, 0xf0, 0x6e, 0xdd, 0x84, 0x46, 0xab, 0xc8, 0x54, 0x10, 0xf1, 0x20, 0x9b, + 0xcd, 0xb8, 0xb3, 0xb4, 0xd9, 0x59, 0x76, 0x27, 0x87, 0xbc, 0x81, 0xec, 0xc9, 0x17, 0xd8, 0x93, + 0x82, 0xef, 0xe0, 0x5d, 0xe8, 0xb1, 0x47, 0xf1, 0x50, 0x24, 0x79, 0x11, 0xd9, 0xd9, 0x2a, 0xcb, + 0x4a, 0x4e, 0xf3, 0x7d, 0x33, 0xbf, 0xef, 0xff, 0xff, 0xcf, 0x30, 0xb0, 0xed, 0x4b, 0xe1, 0x9f, + 0xa7, 0x8b, 0xb9, 0x13, 0x27, 0x4a, 0x2b, 0xdc, 0xf6, 0x55, 0xa4, 0x45, 0xa4, 0xa5, 0x97, 0x4a, + 0x72, 0x18, 0x84, 0x5a, 0x2e, 0xa6, 0x8e, 0xaf, 0xe6, 0x83, 0x40, 0x05, 0x6a, 0x60, 0x98, 0xe9, + 0xe2, 0xa3, 0xe9, 0x4c, 0x63, 0xaa, 0x72, 0xb6, 0xf7, 0x0d, 0x41, 0xfb, 0x99, 0xe7, 0x4b, 0xc1, + 0x85, 0xaf, 0x92, 0x19, 0x7e, 0x0e, 0xad, 0x59, 0x18, 0x88, 0x54, 0xdb, 0x88, 0xa1, 0xfe, 0xdd, + 0xe3, 0xe1, 0xe5, 0x75, 0xd7, 0xfa, 0x75, 0xdd, 0x3d, 0xa8, 0xc8, 0xaa, 0x58, 0x44, 0x85, 0xa5, + 0x17, 0x46, 0x22, 0x49, 0x07, 0x81, 0x3a, 0x2c, 0x47, 0x1c, 0xd7, 0x2c, 0xfc, 0x46, 0x01, 0x1f, + 0x41, 0x53, 0x2f, 0x63, 0x61, 0x6f, 0x31, 0xd4, 0xdf, 0x1e, 0xee, 0x3b, 0x95, 0x98, 0x4e, 0xc5, + 0xf3, 0xcd, 0x32, 0x16, 0xdc, 0x90, 0x98, 0xc0, 0x9d, 0x8b, 0x30, 0x3a, 0x8f, 0xbc, 0xb9, 0xb0, + 0x1b, 0x85, 0x3f, 0xff, 0xd7, 0xf7, 0xde, 0xc3, 0x4e, 0x65, 0xe8, 0x6d, 0xa8, 0xe5, 0x6b, 0x4f, + 0x4b, 0x8c, 0xa1, 0x19, 0x7b, 0x5a, 0x96, 0x71, 0xb9, 0xa9, 0xf1, 0x11, 0xb4, 0x12, 0x43, 0x19, + 0xeb, 0xf6, 0xd0, 0xde, 0x64, 0xcd, 0x6f, 0xb8, 0xde, 0x18, 0xee, 0x55, 0xb6, 0x53, 0xfc, 0x04, + 0x6e, 0x15, 0x4a, 0xa9, 0x8d, 0x58, 0xa3, 0xdf, 0x1e, 0xb2, 0x4d, 0x02, 0x7f, 0x63, 0xf0, 0x12, + 0x3f, 0xf8, 0x81, 0xe0, 0x7e, 0xed, 0x6a, 0xf8, 0x01, 0x34, 0xc7, 0x93, 0xd3, 0x51, 0xc7, 0x22, + 0x7b, 0x59, 0xce, 0x76, 0x6a, 0xc7, 0xe3, 0xf0, 0x42, 0xe0, 0x2e, 0x34, 0xdc, 0x09, 0xef, 0x20, + 0xb2, 0x9b, 0xe5, 0x0c, 0xd7, 0x08, 0x37, 0x4c, 0xf0, 0x23, 0x00, 0x77, 0xc2, 0x3f, 0x9c, 0x8c, + 0x9e, 0xba, 0x23, 0xde, 0xd9, 0x22, 0xfb, 0x59, 0xce, 0xec, 0xff, 0xb9, 0x13, 0xe1, 0xcd, 0x44, + 0x82, 0x1f, 0xc2, 0xed, 0xb3, 0x77, 0x2f, 0x4f, 0x27, 0xaf, 0x5e, 0x74, 0x1a, 0x84, 0x64, 0x39, + 0xdb, 0xad, 0xa1, 0x67, 0xcb, 0x79, 0xf1, 0xae, 0x64, 0xef, 0xd3, 0x17, 0x6a, 0x7d, 0xff, 0x4a, + 0xeb, 0x99, 0x8f, 0xed, 0xcb, 0x15, 0x45, 0x57, 0x2b, 0x8a, 0x7e, 0xaf, 0x28, 0xfa, 0xbc, 0xa6, + 0xd6, 0xd5, 0x9a, 0x5a, 0x3f, 0xd7, 0xd4, 0x9a, 0xb6, 0xcc, 0xbf, 0x79, 0xfc, 0x27, 0x00, 0x00, + 0xff, 0xff, 0xfd, 0xd7, 0xd8, 0x37, 0x85, 0x02, 0x00, 0x00, +} + +func (m *CacheRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CacheRecord) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CacheRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Linkname) > 0 { + i -= len(m.Linkname) + copy(dAtA[i:], m.Linkname) + i = encodeVarintChecksum(dAtA, i, uint64(len(m.Linkname))) + i-- + dAtA[i] = 0x1a + } + if m.Type != 0 { + i = encodeVarintChecksum(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x10 + } + if len(m.Digest) > 0 { + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintChecksum(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CacheRecordWithPath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CacheRecordWithPath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CacheRecordWithPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Record != nil { + { + size, err := m.Record.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintChecksum(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintChecksum(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CacheRecords) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CacheRecords) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CacheRecords) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintChecksum(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintChecksum(dAtA []byte, offset int, v uint64) int { + offset -= sovChecksum(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CacheRecord) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovChecksum(uint64(l)) + } + if m.Type != 0 { + n += 1 + sovChecksum(uint64(m.Type)) + } + l = len(m.Linkname) + if l > 0 { + n += 1 + l + sovChecksum(uint64(l)) + } + return n +} + +func (m *CacheRecordWithPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovChecksum(uint64(l)) + } + if m.Record != nil { + l = m.Record.Size() + n += 1 + l + sovChecksum(uint64(l)) + } + return n +} + +func (m *CacheRecords) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovChecksum(uint64(l)) + } + } + return n +} + +func sovChecksum(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozChecksum(x uint64) (n int) { + return sovChecksum(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CacheRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChecksum + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChecksum + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= CacheRecordType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linkname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChecksum + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChecksum + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Linkname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipChecksum(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthChecksum + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthChecksum + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CacheRecordWithPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheRecordWithPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheRecordWithPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthChecksum + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthChecksum + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthChecksum + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthChecksum + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Record == nil { + m.Record = &CacheRecord{} + } + if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipChecksum(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthChecksum + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthChecksum + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CacheRecords) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheRecords: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheRecords: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChecksum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthChecksum + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthChecksum + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, &CacheRecordWithPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipChecksum(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthChecksum + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthChecksum + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipChecksum(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowChecksum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowChecksum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowChecksum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthChecksum + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupChecksum + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthChecksum + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthChecksum = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowChecksum = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupChecksum = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.proto b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.proto new file mode 100644 index 00000000..d6e524ea --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package contenthash; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +enum CacheRecordType { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "CacheRecordType"; + + FILE = 0 [(gogoproto.enumvalue_customname) = "CacheRecordTypeFile"]; + DIR = 1 [(gogoproto.enumvalue_customname) = "CacheRecordTypeDir"]; + DIR_HEADER = 2 [(gogoproto.enumvalue_customname) = "CacheRecordTypeDirHeader"]; + SYMLINK = 3 [(gogoproto.enumvalue_customname) = "CacheRecordTypeSymlink"]; +} + +message CacheRecord { + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + CacheRecordType type = 2; + string linkname = 3; +} + +message CacheRecordWithPath { + string path = 1; + CacheRecord record = 2; +} + +message CacheRecords { + repeated CacheRecordWithPath paths = 1; +} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go b/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go new file mode 100644 index 00000000..27b6570b --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go @@ -0,0 +1,101 @@ +package contenthash + +import ( + "archive/tar" + "crypto/sha256" + "hash" + "os" + "path/filepath" + "time" + + fstypes "github.com/tonistiigi/fsutil/types" +) + +// NewFileHash returns new hash that is used for the builder cache keys +func NewFileHash(path string, fi os.FileInfo) (hash.Hash, error) { + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return nil, err + } + } + + stat := &fstypes.Stat{ + Mode: uint32(fi.Mode()), + Size_: fi.Size(), + ModTime: fi.ModTime().UnixNano(), + Linkname: link, + } + + if fi.Mode()&os.ModeSymlink != 0 { + stat.Mode = stat.Mode | 0777 + } + + if err := setUnixOpt(path, fi, stat); err != nil { + return nil, err + } + return NewFromStat(stat) +} + +func NewFromStat(stat *fstypes.Stat) (hash.Hash, error) { + // Clear the socket bit since archive/tar.FileInfoHeader does not handle it + stat.Mode &^= uint32(os.ModeSocket) + + fi := &statInfo{stat} + hdr, err := tar.FileInfoHeader(fi, stat.Linkname) + if err != nil { + return nil, err + } + hdr.Name = "" // note: empty name is different from current has in docker build. Name is added on recursive directory scan instead + hdr.Mode = int64(chmodWindowsTarEntry(os.FileMode(hdr.Mode))) + hdr.Devmajor = stat.Devmajor + hdr.Devminor = stat.Devminor + + if len(stat.Xattrs) > 0 { + hdr.Xattrs = make(map[string]string, len(stat.Xattrs)) + for k, v := range stat.Xattrs { + hdr.Xattrs[k] = string(v) + } + } + // fmt.Printf("hdr: %#v\n", hdr) + tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()} + tsh.Reset() // initialize header + return tsh, nil +} + +type tarsumHash struct { + hash.Hash + hdr *tar.Header +} + +// Reset resets the Hash to its initial state. +func (tsh *tarsumHash) Reset() { + // comply with hash.Hash and reset to the state hash had before any writes + tsh.Hash.Reset() + WriteV1TarsumHeaders(tsh.hdr, tsh.Hash) +} + +type statInfo struct { + *fstypes.Stat +} + +func (s *statInfo) Name() string { + return filepath.Base(s.Stat.Path) +} +func (s *statInfo) Size() int64 { + return s.Stat.Size_ +} +func (s *statInfo) Mode() os.FileMode { + return os.FileMode(s.Stat.Mode) +} +func (s *statInfo) ModTime() time.Time { + return time.Unix(s.Stat.ModTime/1e9, s.Stat.ModTime%1e9) +} +func (s *statInfo) IsDir() bool { + return s.Mode().IsDir() +} +func (s *statInfo) Sys() interface{} { + return s.Stat +} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go new file mode 100644 index 00000000..4f610d77 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go @@ -0,0 +1,47 @@ +// +build !windows + +package contenthash + +import ( + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + fstypes "github.com/tonistiigi/fsutil/types" + + "golang.org/x/sys/unix" +) + +func chmodWindowsTarEntry(perm os.FileMode) os.FileMode { + return perm +} + +func setUnixOpt(path string, fi os.FileInfo, stat *fstypes.Stat) error { + s := fi.Sys().(*syscall.Stat_t) + + stat.Uid = s.Uid + stat.Gid = s.Gid + + if !fi.IsDir() { + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + stat.Devmajor = int64(unix.Major(uint64(s.Rdev))) + stat.Devminor = int64(unix.Minor(uint64(s.Rdev))) + } + } + + attrs, err := sysx.LListxattr(path) + if err != nil { + return err + } + if len(attrs) > 0 { + stat.Xattrs = map[string][]byte{} + for _, attr := range attrs { + v, err := sysx.LGetxattr(path, attr) + if err == nil { + stat.Xattrs[attr] = v + } + } + } + return nil +} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go new file mode 100644 index 00000000..e15bf1e5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package contenthash + +import ( + "os" + + fstypes "github.com/tonistiigi/fsutil/types" +) + +// chmodWindowsTarEntry is used to adjust the file permissions used in tar +// header based on the platform the archival is done. +func chmodWindowsTarEntry(perm os.FileMode) os.FileMode { + perm &= 0755 + // Add the x bit: make everything +x from windows + perm |= 0111 + + return perm +} + +func setUnixOpt(path string, fi os.FileInfo, stat *fstypes.Stat) error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/generate.go b/vendor/github.com/moby/buildkit/cache/contenthash/generate.go new file mode 100644 index 00000000..e4bd2c50 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/generate.go @@ -0,0 +1,3 @@ +package contenthash + +//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. checksum.proto diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/path.go b/vendor/github.com/moby/buildkit/cache/contenthash/path.go new file mode 100644 index 00000000..1084da08 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/path.go @@ -0,0 +1,107 @@ +package contenthash + +import ( + "errors" + "os" + "path/filepath" +) + +var ( + errTooManyLinks = errors.New("too many links") +) + +type onSymlinkFunc func(string, string) error + +// rootPath joins a path with a root, evaluating and bounding any +// symlink to the root directory. +// This is containerd/continuity/fs RootPath implementation with a callback on +// resolving the symlink. +func rootPath(root, path string, cb onSymlinkFunc) (string, error) { + if path == "" { + return root, nil + } + var linksWalked int // to protect against cycles + for { + i := linksWalked + newpath, err := walkLinks(root, path, &linksWalked, cb) + if err != nil { + return "", err + } + path = newpath + if i == linksWalked { + newpath = filepath.Join("/", newpath) + if path == newpath { + return filepath.Join(root, newpath), nil + } + path = newpath + } + } +} + +func walkLink(root, path string, linksWalked *int, cb onSymlinkFunc) (newpath string, islink bool, err error) { + if *linksWalked > 255 { + return "", false, errTooManyLinks + } + + path = filepath.Join("/", path) + if path == "/" { + return path, false, nil + } + realPath := filepath.Join(root, path) + + fi, err := os.Lstat(realPath) + if err != nil { + // If path does not yet exist, treat as non-symlink + if os.IsNotExist(err) { + return path, false, nil + } + return "", false, err + } + if fi.Mode()&os.ModeSymlink == 0 { + return path, false, nil + } + newpath, err = os.Readlink(realPath) + if err != nil { + return "", false, err + } + if cb != nil { + if err := cb(path, newpath); err != nil { + return "", false, err + } + } + *linksWalked++ + return newpath, true, nil +} + +func walkLinks(root, path string, linksWalked *int, cb onSymlinkFunc) (string, error) { + switch dir, file := filepath.Split(path); { + case dir == "": + newpath, _, err := walkLink(root, file, linksWalked, cb) + return newpath, err + case file == "": + if os.IsPathSeparator(dir[len(dir)-1]) { + if dir == "/" { + return dir, nil + } + return walkLinks(root, dir[:len(dir)-1], linksWalked, cb) + } + newpath, _, err := walkLink(root, dir, linksWalked, cb) + return newpath, err + default: + newdir, err := walkLinks(root, dir, linksWalked, cb) + if err != nil { + return "", err + } + newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked, cb) + if err != nil { + return "", err + } + if !islink { + return newpath, nil + } + if filepath.IsAbs(newpath) { + return newpath, nil + } + return filepath.Join(newdir, newpath), nil + } +} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go b/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go new file mode 100644 index 00000000..de72d6cd --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go @@ -0,0 +1,63 @@ +package contenthash + +import ( + "archive/tar" + "io" + "sort" + "strconv" + "strings" +) + +// WriteV1TarsumHeaders writes a tar header to a writer in V1 tarsum format. +func WriteV1TarsumHeaders(h *tar.Header, w io.Writer) { + for _, elem := range v1TarHeaderSelect(h) { + w.Write([]byte(elem[0] + elem[1])) + } +} + +// Functions below are from docker legacy tarsum implementation. +// There is no valid technical reason to continue using them. + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.FormatInt(h.Mode, 10)}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.FormatInt(h.Size, 10)}, + {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, + {"devminor", strconv.FormatInt(h.Devminor, 10)}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + if k == "security.capability" || !strings.HasPrefix(k, "security.") && !strings.HasPrefix(k, "system.") { + xAttrKeys = append(xAttrKeys, k) + } + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} diff --git a/vendor/github.com/moby/buildkit/cache/manager.go b/vendor/github.com/moby/buildkit/cache/manager.go new file mode 100644 index 00000000..c5ff035b --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/manager.go @@ -0,0 +1,1092 @@ +package cache + +import ( + "context" + "sort" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/gc" + "github.com/containerd/containerd/leases" + "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/snapshot" + "github.com/opencontainers/go-digest" + imagespecidentity "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +var ( + ErrLocked = errors.New("locked") + errNotFound = errors.New("not found") + errInvalid = errors.New("invalid") +) + +type ManagerOpt struct { + Snapshotter snapshot.Snapshotter + MetadataStore *metadata.Store + ContentStore content.Store + LeaseManager leases.Manager + PruneRefChecker ExternalRefCheckerFunc + GarbageCollect func(ctx context.Context) (gc.Stats, error) + Applier diff.Applier +} + +type Accessor interface { + GetByBlob(ctx context.Context, desc ocispec.Descriptor, parent ImmutableRef, opts ...RefOption) (ImmutableRef, error) + Get(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) + + New(ctx context.Context, parent ImmutableRef, opts ...RefOption) (MutableRef, error) + GetMutable(ctx context.Context, id string) (MutableRef, error) // Rebase? + IdentityMapping() *idtools.IdentityMapping + Metadata(string) *metadata.StorageItem +} + +type Controller interface { + DiskUsage(ctx context.Context, info client.DiskUsageInfo) ([]*client.UsageInfo, error) + Prune(ctx context.Context, ch chan client.UsageInfo, info ...client.PruneInfo) error +} + +type Manager interface { + Accessor + Controller + Close() error +} + +type ExternalRefCheckerFunc func() (ExternalRefChecker, error) + +type ExternalRefChecker interface { + Exists(string, []digest.Digest) bool +} + +type cacheManager struct { + records map[string]*cacheRecord + mu sync.Mutex + ManagerOpt + md *metadata.Store + + muPrune sync.Mutex // make sure parallel prune is not allowed so there will not be inconsistent results +} + +func NewManager(opt ManagerOpt) (Manager, error) { + cm := &cacheManager{ + ManagerOpt: opt, + md: opt.MetadataStore, + records: make(map[string]*cacheRecord), + } + + if err := cm.init(context.TODO()); err != nil { + return nil, err + } + + // cm.scheduleGC(5 * time.Minute) + + return cm, nil +} + +func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, parent ImmutableRef, opts ...RefOption) (ir ImmutableRef, err error) { + diffID, err := diffIDFromDescriptor(desc) + if err != nil { + return nil, err + } + chainID := diffID + blobChainID := imagespecidentity.ChainID([]digest.Digest{desc.Digest, diffID}) + + if desc.Digest != "" { + if _, err := cm.ContentStore.Info(ctx, desc.Digest); err != nil { + return nil, errors.Wrapf(err, "failed to get blob %s", desc.Digest) + } + } + + var p *immutableRef + var parentID string + if parent != nil { + pInfo := parent.Info() + if pInfo.ChainID == "" || pInfo.BlobChainID == "" { + return nil, errors.Errorf("failed to get ref by blob on non-addressable parent") + } + chainID = imagespecidentity.ChainID([]digest.Digest{pInfo.ChainID, chainID}) + blobChainID = imagespecidentity.ChainID([]digest.Digest{pInfo.BlobChainID, blobChainID}) + p2, err := cm.Get(ctx, parent.ID(), NoUpdateLastUsed) + if err != nil { + return nil, err + } + if err := p2.Finalize(ctx, true); err != nil { + return nil, err + } + parentID = p2.ID() + p = p2.(*immutableRef) + } + + releaseParent := false + defer func() { + if releaseParent || err != nil && p != nil { + p.Release(context.TODO()) + } + }() + + cm.mu.Lock() + defer cm.mu.Unlock() + + sis, err := cm.MetadataStore.Search("blobchainid:" + blobChainID.String()) + if err != nil { + return nil, err + } + + for _, si := range sis { + ref, err := cm.get(ctx, si.ID(), opts...) + if err != nil && errors.Cause(err) != errNotFound { + return nil, errors.Wrapf(err, "failed to get record %s by blobchainid", si.ID()) + } + if p != nil { + releaseParent = true + } + return ref, nil + } + + sis, err = cm.MetadataStore.Search("chainid:" + chainID.String()) + if err != nil { + return nil, err + } + + var link ImmutableRef + for _, si := range sis { + ref, err := cm.get(ctx, si.ID(), opts...) + if err != nil && errors.Cause(err) != errNotFound { + return nil, errors.Wrapf(err, "failed to get record %s by chainid", si.ID()) + } + link = ref + break + } + + id := identity.NewID() + snapshotID := chainID.String() + blobOnly := true + if link != nil { + snapshotID = getSnapshotID(link.Metadata()) + blobOnly = getBlobOnly(link.Metadata()) + go link.Release(context.TODO()) + } + + l, err := cm.ManagerOpt.LeaseManager.Create(ctx, func(l *leases.Lease) error { + l.ID = id + l.Labels = map[string]string{ + "containerd.io/gc.flat": time.Now().UTC().Format(time.RFC3339Nano), + } + return nil + }) + if err != nil { + return nil, errors.Wrap(err, "failed to create lease") + } + + defer func() { + if err != nil { + if err := cm.ManagerOpt.LeaseManager.Delete(context.TODO(), leases.Lease{ + ID: l.ID, + }); err != nil { + logrus.Errorf("failed to remove lease: %+v", err) + } + } + }() + + if err := cm.ManagerOpt.LeaseManager.AddResource(ctx, l, leases.Resource{ + ID: snapshotID, + Type: "snapshots/" + cm.ManagerOpt.Snapshotter.Name(), + }); err != nil { + return nil, errors.Wrapf(err, "failed to add snapshot %s to lease", id) + } + + if desc.Digest != "" { + if err := cm.ManagerOpt.LeaseManager.AddResource(ctx, leases.Lease{ID: id}, leases.Resource{ + ID: desc.Digest.String(), + Type: "content", + }); err != nil { + return nil, errors.Wrapf(err, "failed to add blob %s to lease", id) + } + } + + md, _ := cm.md.Get(id) + + rec := &cacheRecord{ + mu: &sync.Mutex{}, + cm: cm, + refs: make(map[ref]struct{}), + parent: p, + md: md, + } + + if err := initializeMetadata(rec, parentID, opts...); err != nil { + return nil, err + } + + queueDiffID(rec.md, diffID.String()) + queueBlob(rec.md, desc.Digest.String()) + queueChainID(rec.md, chainID.String()) + queueBlobChainID(rec.md, blobChainID.String()) + queueSnapshotID(rec.md, snapshotID) + queueBlobOnly(rec.md, blobOnly) + queueMediaType(rec.md, desc.MediaType) + queueCommitted(rec.md) + + if err := rec.md.Commit(); err != nil { + return nil, err + } + + cm.records[id] = rec + + return rec.ref(true), nil +} + +// init loads all snapshots from metadata state and tries to load the records +// from the snapshotter. If snaphot can't be found, metadata is deleted as well. +func (cm *cacheManager) init(ctx context.Context) error { + items, err := cm.md.All() + if err != nil { + return err + } + + for _, si := range items { + if _, err := cm.getRecord(ctx, si.ID()); err != nil { + logrus.Debugf("could not load snapshot %s: %+v", si.ID(), err) + cm.md.Clear(si.ID()) + cm.LeaseManager.Delete(ctx, leases.Lease{ID: si.ID()}) + } + } + return nil +} + +// IdentityMapping returns the userns remapping used for refs +func (cm *cacheManager) IdentityMapping() *idtools.IdentityMapping { + return cm.Snapshotter.IdentityMapping() +} + +// Close closes the manager and releases the metadata database lock. No other +// method should be called after Close. +func (cm *cacheManager) Close() error { + // TODO: allocate internal context and cancel it here + return cm.md.Close() +} + +// Get returns an immutable snapshot reference for ID +func (cm *cacheManager) Get(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) { + cm.mu.Lock() + defer cm.mu.Unlock() + return cm.get(ctx, id, opts...) +} + +func (cm *cacheManager) Metadata(id string) *metadata.StorageItem { + cm.mu.Lock() + defer cm.mu.Unlock() + r, ok := cm.records[id] + if !ok { + return nil + } + return r.Metadata() +} + +// get requires manager lock to be taken +func (cm *cacheManager) get(ctx context.Context, id string, opts ...RefOption) (*immutableRef, error) { + rec, err := cm.getRecord(ctx, id, opts...) + if err != nil { + return nil, err + } + rec.mu.Lock() + defer rec.mu.Unlock() + + triggerUpdate := true + for _, o := range opts { + if o == NoUpdateLastUsed { + triggerUpdate = false + } + } + + if rec.mutable { + if len(rec.refs) != 0 { + return nil, errors.Wrapf(ErrLocked, "%s is locked", id) + } + if rec.equalImmutable != nil { + return rec.equalImmutable.ref(triggerUpdate), nil + } + return rec.mref(triggerUpdate).commit(ctx) + } + + return rec.ref(triggerUpdate), nil +} + +// getRecord returns record for id. Requires manager lock. +func (cm *cacheManager) getRecord(ctx context.Context, id string, opts ...RefOption) (cr *cacheRecord, retErr error) { + if rec, ok := cm.records[id]; ok { + if rec.isDead() { + return nil, errors.Wrapf(errNotFound, "failed to get dead record %s", id) + } + return rec, nil + } + + md, ok := cm.md.Get(id) + if !ok { + return nil, errors.Wrapf(errNotFound, "%s not found", id) + } + if mutableID := getEqualMutable(md); mutableID != "" { + mutable, err := cm.getRecord(ctx, mutableID) + if err != nil { + // check loading mutable deleted record from disk + if errors.Cause(err) == errNotFound { + cm.md.Clear(id) + } + return nil, err + } + rec := &cacheRecord{ + mu: &sync.Mutex{}, + cm: cm, + refs: make(map[ref]struct{}), + parent: mutable.parentRef(false), + md: md, + equalMutable: &mutableRef{cacheRecord: mutable}, + } + mutable.equalImmutable = &immutableRef{cacheRecord: rec} + cm.records[id] = rec + return rec, nil + } + + var parent *immutableRef + if parentID := getParent(md); parentID != "" { + var err error + parent, err = cm.get(ctx, parentID, append(opts, NoUpdateLastUsed)...) + if err != nil { + return nil, err + } + defer func() { + if retErr != nil { + parent.mu.Lock() + parent.release(context.TODO()) + parent.mu.Unlock() + } + }() + } + + rec := &cacheRecord{ + mu: &sync.Mutex{}, + mutable: !getCommitted(md), + cm: cm, + refs: make(map[ref]struct{}), + parent: parent, + md: md, + } + + // the record was deleted but we crashed before data on disk was removed + if getDeleted(md) { + if err := rec.remove(ctx, true); err != nil { + return nil, err + } + return nil, errors.Wrapf(errNotFound, "failed to get deleted record %s", id) + } + + if err := initializeMetadata(rec, getParent(md), opts...); err != nil { + return nil, err + } + + cm.records[id] = rec + return rec, nil +} + +func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, opts ...RefOption) (mr MutableRef, err error) { + id := identity.NewID() + + var parent *immutableRef + var parentID string + var parentSnapshotID string + if s != nil { + p, err := cm.Get(ctx, s.ID(), NoUpdateLastUsed) + if err != nil { + return nil, err + } + if err := p.Finalize(ctx, true); err != nil { + return nil, err + } + parent = p.(*immutableRef) + parentSnapshotID = getSnapshotID(parent.md) + parentID = parent.ID() + } + + defer func() { + if err != nil && parent != nil { + parent.Release(context.TODO()) + } + }() + + l, err := cm.ManagerOpt.LeaseManager.Create(ctx, func(l *leases.Lease) error { + l.ID = id + l.Labels = map[string]string{ + "containerd.io/gc.flat": time.Now().UTC().Format(time.RFC3339Nano), + } + return nil + }) + if err != nil { + return nil, errors.Wrap(err, "failed to create lease") + } + + defer func() { + if err != nil { + if err := cm.ManagerOpt.LeaseManager.Delete(context.TODO(), leases.Lease{ + ID: l.ID, + }); err != nil { + logrus.Errorf("failed to remove lease: %+v", err) + } + } + }() + + if err := cm.ManagerOpt.LeaseManager.AddResource(ctx, l, leases.Resource{ + ID: id, + Type: "snapshots/" + cm.ManagerOpt.Snapshotter.Name(), + }); err != nil { + return nil, errors.Wrapf(err, "failed to add snapshot %s to lease", id) + } + + if err := cm.Snapshotter.Prepare(ctx, id, parentSnapshotID); err != nil { + return nil, errors.Wrapf(err, "failed to prepare %s", id) + } + + md, _ := cm.md.Get(id) + + rec := &cacheRecord{ + mu: &sync.Mutex{}, + mutable: true, + cm: cm, + refs: make(map[ref]struct{}), + parent: parent, + md: md, + } + + if err := initializeMetadata(rec, parentID, opts...); err != nil { + return nil, err + } + + cm.mu.Lock() + defer cm.mu.Unlock() + + cm.records[id] = rec // TODO: save to db + + return rec.mref(true), nil +} +func (cm *cacheManager) GetMutable(ctx context.Context, id string) (MutableRef, error) { + cm.mu.Lock() + defer cm.mu.Unlock() + + rec, err := cm.getRecord(ctx, id) + if err != nil { + return nil, err + } + + rec.mu.Lock() + defer rec.mu.Unlock() + if !rec.mutable { + return nil, errors.Wrapf(errInvalid, "%s is not mutable", id) + } + + if len(rec.refs) != 0 { + return nil, errors.Wrapf(ErrLocked, "%s is locked", id) + } + + if rec.equalImmutable != nil { + if len(rec.equalImmutable.refs) != 0 { + return nil, errors.Wrapf(ErrLocked, "%s is locked", id) + } + delete(cm.records, rec.equalImmutable.ID()) + if err := rec.equalImmutable.remove(ctx, false); err != nil { + return nil, err + } + rec.equalImmutable = nil + } + + return rec.mref(true), nil +} + +func (cm *cacheManager) Prune(ctx context.Context, ch chan client.UsageInfo, opts ...client.PruneInfo) error { + cm.muPrune.Lock() + + for _, opt := range opts { + if err := cm.pruneOnce(ctx, ch, opt); err != nil { + cm.muPrune.Unlock() + return err + } + } + + cm.muPrune.Unlock() + + if cm.GarbageCollect != nil { + if _, err := cm.GarbageCollect(ctx); err != nil { + return err + } + } + + return nil +} + +func (cm *cacheManager) pruneOnce(ctx context.Context, ch chan client.UsageInfo, opt client.PruneInfo) error { + filter, err := filters.ParseAll(opt.Filter...) + if err != nil { + return errors.Wrapf(err, "failed to parse prune filters %v", opt.Filter) + } + + var check ExternalRefChecker + if f := cm.PruneRefChecker; f != nil && (!opt.All || len(opt.Filter) > 0) { + c, err := f() + if err != nil { + return errors.WithStack(err) + } + check = c + } + + totalSize := int64(0) + if opt.KeepBytes != 0 { + du, err := cm.DiskUsage(ctx, client.DiskUsageInfo{}) + if err != nil { + return err + } + for _, ui := range du { + if ui.Shared { + continue + } + totalSize += ui.Size + } + } + + return cm.prune(ctx, ch, pruneOpt{ + filter: filter, + all: opt.All, + checkShared: check, + keepDuration: opt.KeepDuration, + keepBytes: opt.KeepBytes, + totalSize: totalSize, + }) +} + +func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt pruneOpt) error { + var toDelete []*deleteRecord + + if opt.keepBytes != 0 && opt.totalSize < opt.keepBytes { + return nil + } + + cm.mu.Lock() + + gcMode := opt.keepBytes != 0 + cutOff := time.Now().Add(-opt.keepDuration) + + locked := map[*sync.Mutex]struct{}{} + + for _, cr := range cm.records { + if _, ok := locked[cr.mu]; ok { + continue + } + cr.mu.Lock() + + // ignore duplicates that share data + if cr.equalImmutable != nil && len(cr.equalImmutable.refs) > 0 || cr.equalMutable != nil && len(cr.refs) == 0 { + cr.mu.Unlock() + continue + } + + if cr.isDead() { + cr.mu.Unlock() + continue + } + + if len(cr.refs) == 0 { + recordType := GetRecordType(cr) + if recordType == "" { + recordType = client.UsageRecordTypeRegular + } + + shared := false + if opt.checkShared != nil { + shared = opt.checkShared.Exists(cr.ID(), cr.parentChain()) + } + + if !opt.all { + if recordType == client.UsageRecordTypeInternal || recordType == client.UsageRecordTypeFrontend || shared { + cr.mu.Unlock() + continue + } + } + + c := &client.UsageInfo{ + ID: cr.ID(), + Mutable: cr.mutable, + RecordType: recordType, + Shared: shared, + } + + usageCount, lastUsedAt := getLastUsed(cr.md) + c.LastUsedAt = lastUsedAt + c.UsageCount = usageCount + + if opt.keepDuration != 0 { + if lastUsedAt != nil && lastUsedAt.After(cutOff) { + cr.mu.Unlock() + continue + } + } + + if opt.filter.Match(adaptUsageInfo(c)) { + toDelete = append(toDelete, &deleteRecord{ + cacheRecord: cr, + lastUsedAt: c.LastUsedAt, + usageCount: c.UsageCount, + }) + if !gcMode { + cr.dead = true + + // mark metadata as deleted in case we crash before cleanup finished + if err := setDeleted(cr.md); err != nil { + cr.mu.Unlock() + cm.mu.Unlock() + return err + } + } else { + locked[cr.mu] = struct{}{} + continue // leave the record locked + } + } + } + cr.mu.Unlock() + } + + if gcMode && len(toDelete) > 0 { + sortDeleteRecords(toDelete) + var err error + for i, cr := range toDelete { + // only remove single record at a time + if i == 0 { + cr.dead = true + err = setDeleted(cr.md) + } + cr.mu.Unlock() + } + if err != nil { + return err + } + toDelete = toDelete[:1] + } + + cm.mu.Unlock() + + if len(toDelete) == 0 { + return nil + } + + var err error + for _, cr := range toDelete { + cr.mu.Lock() + + usageCount, lastUsedAt := getLastUsed(cr.md) + + c := client.UsageInfo{ + ID: cr.ID(), + Mutable: cr.mutable, + InUse: len(cr.refs) > 0, + Size: getSize(cr.md), + CreatedAt: GetCreatedAt(cr.md), + Description: GetDescription(cr.md), + LastUsedAt: lastUsedAt, + UsageCount: usageCount, + } + + if cr.parent != nil { + c.Parent = cr.parent.ID() + } + if c.Size == sizeUnknown && cr.equalImmutable != nil { + c.Size = getSize(cr.equalImmutable.md) // benefit from DiskUsage calc + } + if c.Size == sizeUnknown { + cr.mu.Unlock() // all the non-prune modifications already protected by cr.dead + s, err := cr.Size(ctx) + if err != nil { + return err + } + c.Size = s + cr.mu.Lock() + } + + opt.totalSize -= c.Size + + if cr.equalImmutable != nil { + if err1 := cr.equalImmutable.remove(ctx, false); err == nil { + err = err1 + } + } + if err1 := cr.remove(ctx, true); err == nil { + err = err1 + } + + if err == nil && ch != nil { + ch <- c + } + cr.mu.Unlock() + } + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + return cm.prune(ctx, ch, opt) + } +} + +func (cm *cacheManager) markShared(m map[string]*cacheUsageInfo) error { + if cm.PruneRefChecker == nil { + return nil + } + c, err := cm.PruneRefChecker() + if err != nil { + return errors.WithStack(err) + } + + var markAllParentsShared func(string) + markAllParentsShared = func(id string) { + if v, ok := m[id]; ok { + v.shared = true + if v.parent != "" { + markAllParentsShared(v.parent) + } + } + } + + for id := range m { + if m[id].shared { + continue + } + if b := c.Exists(id, m[id].parentChain); b { + markAllParentsShared(id) + } + } + return nil +} + +type cacheUsageInfo struct { + refs int + parent string + size int64 + mutable bool + createdAt time.Time + usageCount int + lastUsedAt *time.Time + description string + doubleRef bool + recordType client.UsageRecordType + shared bool + parentChain []digest.Digest +} + +func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) { + filter, err := filters.ParseAll(opt.Filter...) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse diskusage filters %v", opt.Filter) + } + + cm.mu.Lock() + + m := make(map[string]*cacheUsageInfo, len(cm.records)) + rescan := make(map[string]struct{}, len(cm.records)) + + for id, cr := range cm.records { + cr.mu.Lock() + // ignore duplicates that share data + if cr.equalImmutable != nil && len(cr.equalImmutable.refs) > 0 || cr.equalMutable != nil && len(cr.refs) == 0 { + cr.mu.Unlock() + continue + } + + usageCount, lastUsedAt := getLastUsed(cr.md) + c := &cacheUsageInfo{ + refs: len(cr.refs), + mutable: cr.mutable, + size: getSize(cr.md), + createdAt: GetCreatedAt(cr.md), + usageCount: usageCount, + lastUsedAt: lastUsedAt, + description: GetDescription(cr.md), + doubleRef: cr.equalImmutable != nil, + recordType: GetRecordType(cr), + parentChain: cr.parentChain(), + } + if c.recordType == "" { + c.recordType = client.UsageRecordTypeRegular + } + if cr.parent != nil { + c.parent = cr.parent.ID() + } + if cr.mutable && c.refs > 0 { + c.size = 0 // size can not be determined because it is changing + } + m[id] = c + rescan[id] = struct{}{} + cr.mu.Unlock() + } + cm.mu.Unlock() + + for { + if len(rescan) == 0 { + break + } + for id := range rescan { + v := m[id] + if v.refs == 0 && v.parent != "" { + m[v.parent].refs-- + if v.doubleRef { + m[v.parent].refs-- + } + rescan[v.parent] = struct{}{} + } + delete(rescan, id) + } + } + + if err := cm.markShared(m); err != nil { + return nil, err + } + + var du []*client.UsageInfo + for id, cr := range m { + c := &client.UsageInfo{ + ID: id, + Mutable: cr.mutable, + InUse: cr.refs > 0, + Size: cr.size, + Parent: cr.parent, + CreatedAt: cr.createdAt, + Description: cr.description, + LastUsedAt: cr.lastUsedAt, + UsageCount: cr.usageCount, + RecordType: cr.recordType, + Shared: cr.shared, + } + if filter.Match(adaptUsageInfo(c)) { + du = append(du, c) + } + } + + eg, ctx := errgroup.WithContext(ctx) + + for _, d := range du { + if d.Size == sizeUnknown { + func(d *client.UsageInfo) { + eg.Go(func() error { + ref, err := cm.Get(ctx, d.ID, NoUpdateLastUsed) + if err != nil { + d.Size = 0 + return nil + } + s, err := ref.Size(ctx) + if err != nil { + return err + } + d.Size = s + return ref.Release(context.TODO()) + }) + }(d) + } + } + + if err := eg.Wait(); err != nil { + return du, err + } + + return du, nil +} + +func IsLocked(err error) bool { + return errors.Cause(err) == ErrLocked +} + +func IsNotFound(err error) bool { + return errors.Cause(err) == errNotFound +} + +type RefOption interface{} + +type cachePolicy int + +const ( + cachePolicyDefault cachePolicy = iota + cachePolicyRetain +) + +type withMetadata interface { + Metadata() *metadata.StorageItem +} + +type noUpdateLastUsed struct{} + +var NoUpdateLastUsed noUpdateLastUsed + +func HasCachePolicyRetain(m withMetadata) bool { + return getCachePolicy(m.Metadata()) == cachePolicyRetain +} + +func CachePolicyRetain(m withMetadata) error { + return queueCachePolicy(m.Metadata(), cachePolicyRetain) +} + +func CachePolicyDefault(m withMetadata) error { + return queueCachePolicy(m.Metadata(), cachePolicyDefault) +} + +func WithDescription(descr string) RefOption { + return func(m withMetadata) error { + return queueDescription(m.Metadata(), descr) + } +} + +func WithRecordType(t client.UsageRecordType) RefOption { + return func(m withMetadata) error { + return queueRecordType(m.Metadata(), t) + } +} + +func WithCreationTime(tm time.Time) RefOption { + return func(m withMetadata) error { + return queueCreatedAt(m.Metadata(), tm) + } +} + +func initializeMetadata(m withMetadata, parent string, opts ...RefOption) error { + md := m.Metadata() + if tm := GetCreatedAt(md); !tm.IsZero() { + return nil + } + + if err := queueParent(md, parent); err != nil { + return err + } + + if err := queueCreatedAt(md, time.Now()); err != nil { + return err + } + + for _, opt := range opts { + if fn, ok := opt.(func(withMetadata) error); ok { + if err := fn(m); err != nil { + return err + } + } + } + + return md.Commit() +} + +func adaptUsageInfo(info *client.UsageInfo) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "id": + return info.ID, info.ID != "" + case "parent": + return info.Parent, info.Parent != "" + case "description": + return info.Description, info.Description != "" + case "inuse": + return "", info.InUse + case "mutable": + return "", info.Mutable + case "immutable": + return "", !info.Mutable + case "type": + return string(info.RecordType), info.RecordType != "" + case "shared": + return "", info.Shared + case "private": + return "", !info.Shared + } + + // TODO: add int/datetime/bytes support for more fields + + return "", false + }) +} + +type pruneOpt struct { + filter filters.Filter + all bool + checkShared ExternalRefChecker + keepDuration time.Duration + keepBytes int64 + totalSize int64 +} + +type deleteRecord struct { + *cacheRecord + lastUsedAt *time.Time + usageCount int + lastUsedAtIndex int + usageCountIndex int +} + +func sortDeleteRecords(toDelete []*deleteRecord) { + sort.Slice(toDelete, func(i, j int) bool { + if toDelete[i].lastUsedAt == nil { + return true + } + if toDelete[j].lastUsedAt == nil { + return false + } + return toDelete[i].lastUsedAt.Before(*toDelete[j].lastUsedAt) + }) + + maxLastUsedIndex := 0 + var val time.Time + for _, v := range toDelete { + if v.lastUsedAt != nil && v.lastUsedAt.After(val) { + val = *v.lastUsedAt + maxLastUsedIndex++ + } + v.lastUsedAtIndex = maxLastUsedIndex + } + + sort.Slice(toDelete, func(i, j int) bool { + return toDelete[i].usageCount < toDelete[j].usageCount + }) + + maxUsageCountIndex := 0 + var count int + for _, v := range toDelete { + if v.usageCount != count { + count = v.usageCount + maxUsageCountIndex++ + } + v.usageCountIndex = maxUsageCountIndex + } + + sort.Slice(toDelete, func(i, j int) bool { + return float64(toDelete[i].lastUsedAtIndex)/float64(maxLastUsedIndex)+ + float64(toDelete[i].usageCountIndex)/float64(maxUsageCountIndex) < + float64(toDelete[j].lastUsedAtIndex)/float64(maxLastUsedIndex)+ + float64(toDelete[j].usageCountIndex)/float64(maxUsageCountIndex) + }) +} + +func diffIDFromDescriptor(desc ocispec.Descriptor) (digest.Digest, error) { + diffIDStr, ok := desc.Annotations["containerd.io/uncompressed"] + if !ok { + return "", errors.Errorf("missing uncompressed annotation for %s", desc.Digest) + } + diffID, err := digest.Parse(diffIDStr) + if err != nil { + return "", errors.Wrapf(err, "failed to parse diffID %q for %s", diffIDStr, desc.Digest) + } + return diffID, nil +} diff --git a/vendor/github.com/moby/buildkit/cache/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata.go new file mode 100644 index 00000000..bf4041e0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/metadata.go @@ -0,0 +1,501 @@ +package cache + +import ( + "time" + + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/client" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +const sizeUnknown int64 = -1 +const keySize = "snapshot.size" +const keyEqualMutable = "cache.equalMutable" +const keyCachePolicy = "cache.cachePolicy" +const keyDescription = "cache.description" +const keyCreatedAt = "cache.createdAt" +const keyLastUsedAt = "cache.lastUsedAt" +const keyUsageCount = "cache.usageCount" +const keyLayerType = "cache.layerType" +const keyRecordType = "cache.recordType" +const keyCommitted = "snapshot.committed" +const keyParent = "cache.parent" +const keyDiffID = "cache.diffID" +const keyChainID = "cache.chainID" +const keyBlobChainID = "cache.blobChainID" +const keyBlob = "cache.blob" +const keySnapshot = "cache.snapshot" +const keyBlobOnly = "cache.blobonly" +const keyMediaType = "cache.mediatype" + +const keyDeleted = "cache.deleted" + +func queueDiffID(si *metadata.StorageItem, str string) error { + if str == "" { + return nil + } + v, err := metadata.NewValue(str) + if err != nil { + return errors.Wrap(err, "failed to create diffID value") + } + si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, keyDiffID, v) + }) + return nil +} + +func getMediaType(si *metadata.StorageItem) string { + v := si.Get(keyMediaType) + if v == nil { + return si.ID() + } + var str string + if err := v.Unmarshal(&str); err != nil { + return "" + } + return str +} + +func queueMediaType(si *metadata.StorageItem, str string) error { + if str == "" { + return nil + } + v, err := metadata.NewValue(str) + if err != nil { + return errors.Wrap(err, "failed to create mediaType value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyMediaType, v) + }) + return nil +} + +func getSnapshotID(si *metadata.StorageItem) string { + v := si.Get(keySnapshot) + if v == nil { + return si.ID() + } + var str string + if err := v.Unmarshal(&str); err != nil { + return "" + } + return str +} + +func queueSnapshotID(si *metadata.StorageItem, str string) error { + if str == "" { + return nil + } + v, err := metadata.NewValue(str) + if err != nil { + return errors.Wrap(err, "failed to create chainID value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keySnapshot, v) + }) + return nil +} + +func getDiffID(si *metadata.StorageItem) string { + v := si.Get(keyDiffID) + if v == nil { + return "" + } + var str string + if err := v.Unmarshal(&str); err != nil { + return "" + } + return str +} + +func queueChainID(si *metadata.StorageItem, str string) error { + if str == "" { + return nil + } + v, err := metadata.NewValue(str) + if err != nil { + return errors.Wrap(err, "failed to create chainID value") + } + v.Index = "chainid:" + str + si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, keyChainID, v) + }) + return nil +} + +func getBlobChainID(si *metadata.StorageItem) string { + v := si.Get(keyBlobChainID) + if v == nil { + return "" + } + var str string + if err := v.Unmarshal(&str); err != nil { + return "" + } + return str +} + +func queueBlobChainID(si *metadata.StorageItem, str string) error { + if str == "" { + return nil + } + v, err := metadata.NewValue(str) + if err != nil { + return errors.Wrap(err, "failed to create chainID value") + } + v.Index = "blobchainid:" + str + si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, keyBlobChainID, v) + }) + return nil +} + +func getChainID(si *metadata.StorageItem) string { + v := si.Get(keyChainID) + if v == nil { + return "" + } + var str string + if err := v.Unmarshal(&str); err != nil { + return "" + } + return str +} + +func queueBlob(si *metadata.StorageItem, str string) error { + if str == "" { + return nil + } + v, err := metadata.NewValue(str) + if err != nil { + return errors.Wrap(err, "failed to create blob value") + } + si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, keyBlob, v) + }) + return nil +} + +func getBlob(si *metadata.StorageItem) string { + v := si.Get(keyBlob) + if v == nil { + return "" + } + var str string + if err := v.Unmarshal(&str); err != nil { + return "" + } + return str +} + +func queueBlobOnly(si *metadata.StorageItem, b bool) error { + v, err := metadata.NewValue(b) + if err != nil { + return errors.Wrap(err, "failed to create blobonly value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyBlobOnly, v) + }) + return nil +} + +func getBlobOnly(si *metadata.StorageItem) bool { + v := si.Get(keyBlobOnly) + if v == nil { + return false + } + var blobOnly bool + if err := v.Unmarshal(&blobOnly); err != nil { + return false + } + return blobOnly +} + +func setDeleted(si *metadata.StorageItem) error { + v, err := metadata.NewValue(true) + if err != nil { + return errors.Wrap(err, "failed to create deleted value") + } + si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, keyDeleted, v) + }) + return nil +} + +func getDeleted(si *metadata.StorageItem) bool { + v := si.Get(keyDeleted) + if v == nil { + return false + } + var deleted bool + if err := v.Unmarshal(&deleted); err != nil { + return false + } + return deleted +} + +func queueCommitted(si *metadata.StorageItem) error { + v, err := metadata.NewValue(true) + if err != nil { + return errors.Wrap(err, "failed to create committed value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyCommitted, v) + }) + return nil +} + +func getCommitted(si *metadata.StorageItem) bool { + v := si.Get(keyCommitted) + if v == nil { + return false + } + var committed bool + if err := v.Unmarshal(&committed); err != nil { + return false + } + return committed +} + +func queueParent(si *metadata.StorageItem, parent string) error { + if parent == "" { + return nil + } + v, err := metadata.NewValue(parent) + if err != nil { + return errors.Wrap(err, "failed to create parent value") + } + si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, keyParent, v) + }) + return nil +} + +func getParent(si *metadata.StorageItem) string { + v := si.Get(keyParent) + if v == nil { + return "" + } + var parent string + if err := v.Unmarshal(&parent); err != nil { + return "" + } + return parent +} + +func setSize(si *metadata.StorageItem, s int64) error { + v, err := metadata.NewValue(s) + if err != nil { + return errors.Wrap(err, "failed to create size value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keySize, v) + }) + return nil +} + +func getSize(si *metadata.StorageItem) int64 { + v := si.Get(keySize) + if v == nil { + return sizeUnknown + } + var size int64 + if err := v.Unmarshal(&size); err != nil { + return sizeUnknown + } + return size +} + +func getEqualMutable(si *metadata.StorageItem) string { + v := si.Get(keyEqualMutable) + if v == nil { + return "" + } + var str string + if err := v.Unmarshal(&str); err != nil { + return "" + } + return str +} + +func setEqualMutable(si *metadata.StorageItem, s string) error { + v, err := metadata.NewValue(s) + if err != nil { + return errors.Wrapf(err, "failed to create %s meta value", keyEqualMutable) + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyEqualMutable, v) + }) + return nil +} + +func clearEqualMutable(si *metadata.StorageItem) error { + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyEqualMutable, nil) + }) + return nil +} + +func queueCachePolicy(si *metadata.StorageItem, p cachePolicy) error { + v, err := metadata.NewValue(p) + if err != nil { + return errors.Wrap(err, "failed to create cachePolicy value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyCachePolicy, v) + }) + return nil +} + +func getCachePolicy(si *metadata.StorageItem) cachePolicy { + v := si.Get(keyCachePolicy) + if v == nil { + return cachePolicyDefault + } + var p cachePolicy + if err := v.Unmarshal(&p); err != nil { + return cachePolicyDefault + } + return p +} + +func queueDescription(si *metadata.StorageItem, descr string) error { + v, err := metadata.NewValue(descr) + if err != nil { + return errors.Wrap(err, "failed to create description value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyDescription, v) + }) + return nil +} + +func GetDescription(si *metadata.StorageItem) string { + v := si.Get(keyDescription) + if v == nil { + return "" + } + var str string + if err := v.Unmarshal(&str); err != nil { + return "" + } + return str +} + +func queueCreatedAt(si *metadata.StorageItem, tm time.Time) error { + v, err := metadata.NewValue(tm.UnixNano()) + if err != nil { + return errors.Wrap(err, "failed to create createdAt value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyCreatedAt, v) + }) + return nil +} + +func GetCreatedAt(si *metadata.StorageItem) time.Time { + v := si.Get(keyCreatedAt) + if v == nil { + return time.Time{} + } + var tm int64 + if err := v.Unmarshal(&tm); err != nil { + return time.Time{} + } + return time.Unix(tm/1e9, tm%1e9) +} + +func getLastUsed(si *metadata.StorageItem) (int, *time.Time) { + v := si.Get(keyUsageCount) + if v == nil { + return 0, nil + } + var usageCount int + if err := v.Unmarshal(&usageCount); err != nil { + return 0, nil + } + v = si.Get(keyLastUsedAt) + if v == nil { + return usageCount, nil + } + var lastUsedTs int64 + if err := v.Unmarshal(&lastUsedTs); err != nil || lastUsedTs == 0 { + return usageCount, nil + } + tm := time.Unix(lastUsedTs/1e9, lastUsedTs%1e9) + return usageCount, &tm +} + +func updateLastUsed(si *metadata.StorageItem) error { + count, _ := getLastUsed(si) + count++ + + v, err := metadata.NewValue(count) + if err != nil { + return errors.Wrap(err, "failed to create usageCount value") + } + v2, err := metadata.NewValue(time.Now().UnixNano()) + if err != nil { + return errors.Wrap(err, "failed to create lastUsedAt value") + } + return si.Update(func(b *bolt.Bucket) error { + if err := si.SetValue(b, keyUsageCount, v); err != nil { + return err + } + return si.SetValue(b, keyLastUsedAt, v2) + }) +} + +func SetLayerType(m withMetadata, value string) error { + v, err := metadata.NewValue(value) + if err != nil { + return errors.Wrap(err, "failed to create layertype value") + } + m.Metadata().Queue(func(b *bolt.Bucket) error { + return m.Metadata().SetValue(b, keyLayerType, v) + }) + return m.Metadata().Commit() +} + +func GetLayerType(m withMetadata) string { + v := m.Metadata().Get(keyLayerType) + if v == nil { + return "" + } + var str string + if err := v.Unmarshal(&str); err != nil { + return "" + } + return str +} + +func GetRecordType(m withMetadata) client.UsageRecordType { + v := m.Metadata().Get(keyRecordType) + if v == nil { + return "" + } + var str string + if err := v.Unmarshal(&str); err != nil { + return "" + } + return client.UsageRecordType(str) +} + +func SetRecordType(m withMetadata, value client.UsageRecordType) error { + if err := queueRecordType(m.Metadata(), value); err != nil { + return err + } + return m.Metadata().Commit() +} + +func queueRecordType(si *metadata.StorageItem, value client.UsageRecordType) error { + v, err := metadata.NewValue(value) + if err != nil { + return errors.Wrap(err, "failed to create recordtype value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyRecordType, v) + }) + return nil +} diff --git a/vendor/github.com/moby/buildkit/cache/metadata/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata/metadata.go new file mode 100644 index 00000000..42e8cb40 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/metadata/metadata.go @@ -0,0 +1,394 @@ +package metadata + +import ( + "bytes" + "encoding/json" + "strings" + "sync" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + bolt "go.etcd.io/bbolt" +) + +const ( + mainBucket = "_main" + indexBucket = "_index" + externalBucket = "_external" +) + +var errNotFound = errors.Errorf("not found") + +type Store struct { + db *bolt.DB +} + +func NewStore(dbPath string) (*Store, error) { + db, err := bolt.Open(dbPath, 0600, nil) + if err != nil { + return nil, errors.Wrapf(err, "failed to open database file %s", dbPath) + } + return &Store{db: db}, nil +} + +func (s *Store) DB() *bolt.DB { + return s.db +} + +func (s *Store) All() ([]*StorageItem, error) { + var out []*StorageItem + err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(mainBucket)) + if b == nil { + return nil + } + return b.ForEach(func(key, _ []byte) error { + b := b.Bucket(key) + if b == nil { + return nil + } + si, err := newStorageItem(string(key), b, s) + if err != nil { + return err + } + out = append(out, si) + return nil + }) + }) + return out, errors.WithStack(err) +} + +func (s *Store) Probe(index string) (bool, error) { + var exists bool + err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(indexBucket)) + if b == nil { + return nil + } + main := tx.Bucket([]byte(mainBucket)) + if main == nil { + return nil + } + search := []byte(indexKey(index, "")) + c := b.Cursor() + k, _ := c.Seek(search) + if k != nil && bytes.HasPrefix(k, search) { + exists = true + } + return nil + }) + return exists, errors.WithStack(err) +} + +func (s *Store) Search(index string) ([]*StorageItem, error) { + var out []*StorageItem + err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(indexBucket)) + if b == nil { + return nil + } + main := tx.Bucket([]byte(mainBucket)) + if main == nil { + return nil + } + index = indexKey(index, "") + c := b.Cursor() + k, _ := c.Seek([]byte(index)) + for { + if k != nil && strings.HasPrefix(string(k), index) { + itemID := strings.TrimPrefix(string(k), index) + k, _ = c.Next() + b := main.Bucket([]byte(itemID)) + if b == nil { + logrus.Errorf("index pointing to missing record %s", itemID) + continue + } + si, err := newStorageItem(itemID, b, s) + if err != nil { + return err + } + out = append(out, si) + } else { + break + } + } + return nil + }) + return out, errors.WithStack(err) +} + +func (s *Store) View(id string, fn func(b *bolt.Bucket) error) error { + return s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(mainBucket)) + if b == nil { + return errors.WithStack(errNotFound) + } + b = b.Bucket([]byte(id)) + if b == nil { + return errors.WithStack(errNotFound) + } + return fn(b) + }) +} + +func (s *Store) Clear(id string) error { + return errors.WithStack(s.db.Update(func(tx *bolt.Tx) error { + external := tx.Bucket([]byte(externalBucket)) + if external != nil { + external.DeleteBucket([]byte(id)) + } + main := tx.Bucket([]byte(mainBucket)) + if main == nil { + return nil + } + b := main.Bucket([]byte(id)) + if b == nil { + return nil + } + si, err := newStorageItem(id, b, s) + if err != nil { + return err + } + if indexes := si.Indexes(); len(indexes) > 0 { + b := tx.Bucket([]byte(indexBucket)) + if b != nil { + for _, index := range indexes { + if err := b.Delete([]byte(indexKey(index, id))); err != nil { + return err + } + } + } + } + return main.DeleteBucket([]byte(id)) + })) +} + +func (s *Store) Update(id string, fn func(b *bolt.Bucket) error) error { + return errors.WithStack(s.db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte(mainBucket)) + if err != nil { + return errors.WithStack(err) + } + b, err = b.CreateBucketIfNotExists([]byte(id)) + if err != nil { + return errors.WithStack(err) + } + return fn(b) + })) +} + +func (s *Store) Get(id string) (*StorageItem, bool) { + empty := func() *StorageItem { + si, _ := newStorageItem(id, nil, s) + return si + } + tx, err := s.db.Begin(false) + if err != nil { + return empty(), false + } + defer tx.Rollback() + b := tx.Bucket([]byte(mainBucket)) + if b == nil { + return empty(), false + } + b = b.Bucket([]byte(id)) + if b == nil { + return empty(), false + } + si, _ := newStorageItem(id, b, s) + return si, true +} + +func (s *Store) Close() error { + return errors.WithStack(s.db.Close()) +} + +type StorageItem struct { + id string + values map[string]*Value + queue []func(*bolt.Bucket) error + storage *Store + mu sync.RWMutex +} + +func newStorageItem(id string, b *bolt.Bucket, s *Store) (*StorageItem, error) { + si := &StorageItem{ + id: id, + storage: s, + values: make(map[string]*Value), + } + if b != nil { + if err := b.ForEach(func(k, v []byte) error { + var sv Value + if len(v) > 0 { + if err := json.Unmarshal(v, &sv); err != nil { + return errors.WithStack(err) + } + si.values[string(k)] = &sv + } + return nil + }); err != nil { + return si, errors.WithStack(err) + } + } + return si, nil +} + +func (s *StorageItem) Storage() *Store { // TODO: used in local source. how to remove this? + return s.storage +} + +func (s *StorageItem) ID() string { + return s.id +} + +func (s *StorageItem) View(fn func(b *bolt.Bucket) error) error { + return s.storage.View(s.id, fn) +} + +func (s *StorageItem) Update(fn func(b *bolt.Bucket) error) error { + return s.storage.Update(s.id, fn) +} + +func (s *StorageItem) Metadata() *StorageItem { + return s +} + +func (s *StorageItem) Keys() []string { + keys := make([]string, 0, len(s.values)) + for k := range s.values { + keys = append(keys, k) + } + return keys +} + +func (s *StorageItem) Get(k string) *Value { + s.mu.RLock() + v := s.values[k] + s.mu.RUnlock() + return v +} + +func (s *StorageItem) GetExternal(k string) ([]byte, error) { + var dt []byte + err := s.storage.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(externalBucket)) + if b == nil { + return errors.WithStack(errNotFound) + } + b = b.Bucket([]byte(s.id)) + if b == nil { + return errors.WithStack(errNotFound) + } + dt = b.Get([]byte(k)) + if dt == nil { + return errors.WithStack(errNotFound) + } + return nil + }) + if err != nil { + return nil, errors.WithStack(err) + } + return dt, nil +} + +func (s *StorageItem) SetExternal(k string, dt []byte) error { + return errors.WithStack(s.storage.db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte(externalBucket)) + if err != nil { + return errors.WithStack(err) + } + b, err = b.CreateBucketIfNotExists([]byte(s.id)) + if err != nil { + return errors.WithStack(err) + } + return b.Put([]byte(k), dt) + })) +} + +func (s *StorageItem) Queue(fn func(b *bolt.Bucket) error) { + s.mu.Lock() + defer s.mu.Unlock() + s.queue = append(s.queue, fn) +} + +func (s *StorageItem) Commit() error { + s.mu.Lock() + defer s.mu.Unlock() + return errors.WithStack(s.Update(func(b *bolt.Bucket) error { + for _, fn := range s.queue { + if err := fn(b); err != nil { + return errors.WithStack(err) + } + } + s.queue = s.queue[:0] + return nil + })) +} + +func (s *StorageItem) Indexes() (out []string) { + for _, v := range s.values { + if v.Index != "" { + out = append(out, v.Index) + } + } + return +} + +func (s *StorageItem) SetValue(b *bolt.Bucket, key string, v *Value) error { + if v == nil { + if old, ok := s.values[key]; ok { + if old.Index != "" { + b, err := b.Tx().CreateBucketIfNotExists([]byte(indexBucket)) + if err != nil { + return errors.WithStack(err) + } + b.Delete([]byte(indexKey(old.Index, s.ID()))) // ignore error + } + } + if err := b.Put([]byte(key), nil); err != nil { + return err + } + delete(s.values, key) + return nil + } + dt, err := json.Marshal(v) + if err != nil { + return errors.WithStack(err) + } + if err := b.Put([]byte(key), dt); err != nil { + return errors.WithStack(err) + } + if v.Index != "" { + b, err := b.Tx().CreateBucketIfNotExists([]byte(indexBucket)) + if err != nil { + return errors.WithStack(err) + } + if err := b.Put([]byte(indexKey(v.Index, s.ID())), []byte{}); err != nil { + return errors.WithStack(err) + } + } + s.values[key] = v + return nil +} + +type Value struct { + Value json.RawMessage `json:"value,omitempty"` + Index string `json:"index,omitempty"` +} + +func NewValue(v interface{}) (*Value, error) { + dt, err := json.Marshal(v) + if err != nil { + return nil, errors.WithStack(err) + } + return &Value{Value: json.RawMessage(dt)}, nil +} + +func (v *Value) Unmarshal(target interface{}) error { + return errors.WithStack(json.Unmarshal(v.Value, target)) +} + +func indexKey(index, target string) string { + return index + "::" + target +} diff --git a/vendor/github.com/moby/buildkit/cache/migrate_v2.go b/vendor/github.com/moby/buildkit/cache/migrate_v2.go new file mode 100644 index 00000000..9671c282 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/migrate_v2.go @@ -0,0 +1,257 @@ +package cache + +import ( + "context" + "io" + "os" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/snapshots" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/snapshot" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func migrateChainID(si *metadata.StorageItem, all map[string]*metadata.StorageItem) (digest.Digest, digest.Digest, error) { + diffID := digest.Digest(getDiffID(si)) + if diffID == "" { + return "", "", nil + } + blobID := digest.Digest(getBlob(si)) + if blobID == "" { + return "", "", nil + } + chainID := digest.Digest(getChainID(si)) + blobChainID := digest.Digest(getBlobChainID(si)) + + if chainID != "" && blobChainID != "" { + return chainID, blobChainID, nil + } + + chainID = diffID + blobChainID = digest.FromBytes([]byte(blobID + " " + diffID)) + + parent := getParent(si) + if parent != "" { + pChainID, pBlobChainID, err := migrateChainID(all[parent], all) + if err != nil { + return "", "", err + } + chainID = digest.FromBytes([]byte(pChainID + " " + chainID)) + blobChainID = digest.FromBytes([]byte(pBlobChainID + " " + blobChainID)) + } + + queueChainID(si, chainID.String()) + queueBlobChainID(si, blobChainID.String()) + + return chainID, blobChainID, si.Commit() +} + +func MigrateV2(ctx context.Context, from, to string, cs content.Store, s snapshot.Snapshotter, lm leases.Manager) error { + _, err := os.Stat(to) + if err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return errors.WithStack(err) + } + } else { + return nil + } + + _, err = os.Stat(from) + if err != nil { + if !os.IsNotExist(errors.Cause(err)) { + return errors.WithStack(err) + } + return nil + } + tmpPath := to + ".tmp" + tmpFile, err := os.Create(tmpPath) + if err != nil { + return errors.WithStack(err) + } + src, err := os.Open(from) + if err != nil { + tmpFile.Close() + return errors.WithStack(err) + } + if _, err = io.Copy(tmpFile, src); err != nil { + tmpFile.Close() + src.Close() + return errors.Wrapf(err, "failed to copy db for migration") + } + src.Close() + tmpFile.Close() + + md, err := metadata.NewStore(tmpPath) + if err != nil { + return err + } + + items, err := md.All() + if err != nil { + return err + } + + byID := map[string]*metadata.StorageItem{} + for _, item := range items { + byID[item.ID()] = item + } + + // add committed, parent, snapshot + for id, item := range byID { + em := getEqualMutable(item) + if em == "" { + info, err := s.Stat(ctx, id) + if err != nil { + return err + } + if info.Kind == snapshots.KindCommitted { + queueCommitted(item) + } + if info.Parent != "" { + queueParent(item, info.Parent) + } + } else { + queueCommitted(item) + } + queueSnapshotID(item, id) + item.Commit() + } + + for _, item := range byID { + em := getEqualMutable(item) + if em != "" { + if getParent(item) == "" { + queueParent(item, getParent(byID[em])) + item.Commit() + } + } + } + + type diffPair struct { + Blobsum string + DiffID string + } + // move diffID, blobsum to new location + for _, item := range byID { + v := item.Get("blobmapping.blob") + if v == nil { + continue + } + var blob diffPair + if err := v.Unmarshal(&blob); err != nil { + return errors.WithStack(err) + } + if _, err := cs.Info(ctx, digest.Digest(blob.Blobsum)); err != nil { + continue + } + queueDiffID(item, blob.DiffID) + queueBlob(item, blob.Blobsum) + queueMediaType(item, images.MediaTypeDockerSchema2LayerGzip) + if err := item.Commit(); err != nil { + return err + } + + } + + // calculate new chainid/blobsumid + for _, item := range byID { + if _, _, err := migrateChainID(item, byID); err != nil { + return err + } + } + + ctx = context.TODO() // no cancellation allowed pass this point + + // add new leases + for _, item := range byID { + l, err := lm.Create(ctx, func(l *leases.Lease) error { + l.ID = item.ID() + l.Labels = map[string]string{ + "containerd.io/gc.flat": time.Now().UTC().Format(time.RFC3339Nano), + } + return nil + }) + if err != nil { + // if we are running the migration twice + if errdefs.IsAlreadyExists(err) { + continue + } + return errors.Wrap(err, "failed to create lease") + } + + if err := lm.AddResource(ctx, l, leases.Resource{ + ID: getSnapshotID(item), + Type: "snapshots/" + s.Name(), + }); err != nil { + return errors.Wrapf(err, "failed to add snapshot %s to lease", item.ID()) + } + + if blobID := getBlob(item); blobID != "" { + if err := lm.AddResource(ctx, l, leases.Resource{ + ID: blobID, + Type: "content", + }); err != nil { + return errors.Wrapf(err, "failed to add blob %s to lease", item.ID()) + } + } + } + + // remove old root labels + for _, item := range byID { + if _, err := s.Update(ctx, snapshots.Info{ + Name: getSnapshotID(item), + }, "labels.containerd.io/gc.root"); err != nil { + if !errdefs.IsNotFound(errors.Cause(err)) { + return err + } + } + + if blob := getBlob(item); blob != "" { + if _, err := cs.Update(ctx, content.Info{ + Digest: digest.Digest(blob), + }, "labels.containerd.io/gc.root"); err != nil { + return err + } + } + } + + // previous implementation can leak views, just clean up all views + err = s.Walk(ctx, func(ctx context.Context, info snapshots.Info) error { + if info.Kind == snapshots.KindView { + if _, err := s.Update(ctx, snapshots.Info{ + Name: info.Name, + }, "labels.containerd.io/gc.root"); err != nil { + if !errdefs.IsNotFound(errors.Cause(err)) { + return err + } + } + } + return nil + }) + if err != nil { + return err + } + + // switch to new DB + if err := md.Close(); err != nil { + return err + } + + if err := os.Rename(tmpPath, to); err != nil { + return err + } + + for _, item := range byID { + logrus.Infof("migrated %s parent:%q snapshot:%v committed:%v blob:%v diffid:%v chainID:%v blobChainID:%v", + item.ID(), getParent(item), getSnapshotID(item), getCommitted(item), getBlob(item), getDiffID(item), getChainID(item), getBlobChainID(item)) + } + + return nil +} diff --git a/vendor/github.com/moby/buildkit/cache/refs.go b/vendor/github.com/moby/buildkit/cache/refs.go new file mode 100644 index 00000000..5990edb5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/refs.go @@ -0,0 +1,693 @@ +package cache + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/snapshots" + "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/util/leaseutil" + "github.com/opencontainers/go-digest" + imagespecidentity "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Ref is a reference to cacheable objects. +type Ref interface { + Mountable + ID() string + Release(context.Context) error + Size(ctx context.Context) (int64, error) + Metadata() *metadata.StorageItem + IdentityMapping() *idtools.IdentityMapping +} + +type ImmutableRef interface { + Ref + Parent() ImmutableRef + Finalize(ctx context.Context, commit bool) error // Make sure reference is flushed to driver + Clone() ImmutableRef + + Info() RefInfo + SetBlob(ctx context.Context, desc ocispec.Descriptor) error + Extract(ctx context.Context) error // +progress +} + +type RefInfo struct { + SnapshotID string + ChainID digest.Digest + BlobChainID digest.Digest + DiffID digest.Digest + Blob digest.Digest + MediaType string + Extracted bool +} + +type MutableRef interface { + Ref + Commit(context.Context) (ImmutableRef, error) +} + +type Mountable interface { + Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) +} + +type ref interface { + updateLastUsed() bool +} + +type cacheRecord struct { + cm *cacheManager + mu *sync.Mutex // the mutex is shared by records sharing data + + mutable bool + refs map[ref]struct{} + parent *immutableRef + md *metadata.StorageItem + + // dead means record is marked as deleted + dead bool + + view string + viewMount snapshot.Mountable + + sizeG flightcontrol.Group + + // these are filled if multiple refs point to same data + equalMutable *mutableRef + equalImmutable *immutableRef + + parentChainCache []digest.Digest +} + +// hold ref lock before calling +func (cr *cacheRecord) ref(triggerLastUsed bool) *immutableRef { + ref := &immutableRef{cacheRecord: cr, triggerLastUsed: triggerLastUsed} + cr.refs[ref] = struct{}{} + return ref +} + +// hold ref lock before calling +func (cr *cacheRecord) mref(triggerLastUsed bool) *mutableRef { + ref := &mutableRef{cacheRecord: cr, triggerLastUsed: triggerLastUsed} + cr.refs[ref] = struct{}{} + return ref +} + +func (cr *cacheRecord) parentChain() []digest.Digest { + if cr.parentChainCache != nil { + return cr.parentChainCache + } + blob := getBlob(cr.md) + if blob == "" { + return nil + } + + var parent []digest.Digest + if cr.parent != nil { + parent = cr.parent.parentChain() + } + pcc := make([]digest.Digest, len(parent)+1) + copy(pcc, parent) + pcc[len(parent)] = digest.Digest(blob) + cr.parentChainCache = pcc + return pcc +} + +// hold ref lock before calling +func (cr *cacheRecord) isDead() bool { + return cr.dead || (cr.equalImmutable != nil && cr.equalImmutable.dead) || (cr.equalMutable != nil && cr.equalMutable.dead) +} + +func (cr *cacheRecord) IdentityMapping() *idtools.IdentityMapping { + return cr.cm.IdentityMapping() +} + +func (cr *cacheRecord) Size(ctx context.Context) (int64, error) { + // this expects that usage() is implemented lazily + s, err := cr.sizeG.Do(ctx, cr.ID(), func(ctx context.Context) (interface{}, error) { + cr.mu.Lock() + s := getSize(cr.md) + if s != sizeUnknown { + cr.mu.Unlock() + return s, nil + } + driverID := getSnapshotID(cr.md) + if cr.equalMutable != nil { + driverID = getSnapshotID(cr.equalMutable.md) + } + cr.mu.Unlock() + var usage snapshots.Usage + if !getBlobOnly(cr.md) { + var err error + usage, err = cr.cm.ManagerOpt.Snapshotter.Usage(ctx, driverID) + if err != nil { + cr.mu.Lock() + isDead := cr.isDead() + cr.mu.Unlock() + if isDead { + return int64(0), nil + } + if !errdefs.IsNotFound(err) { + return s, errors.Wrapf(err, "failed to get usage for %s", cr.ID()) + } + } + } + if dgst := getBlob(cr.md); dgst != "" { + info, err := cr.cm.ContentStore.Info(ctx, digest.Digest(dgst)) + if err == nil { + usage.Size += info.Size + } + } + cr.mu.Lock() + setSize(cr.md, usage.Size) + if err := cr.md.Commit(); err != nil { + cr.mu.Unlock() + return s, err + } + cr.mu.Unlock() + return usage.Size, nil + }) + if err != nil { + return 0, err + } + return s.(int64), nil +} + +func (cr *cacheRecord) Parent() ImmutableRef { + if p := cr.parentRef(true); p != nil { // avoid returning typed nil pointer + return p + } + return nil +} + +func (cr *cacheRecord) parentRef(hidden bool) *immutableRef { + p := cr.parent + if p == nil { + return nil + } + p.mu.Lock() + defer p.mu.Unlock() + return p.ref(hidden) +} + +func (cr *cacheRecord) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + if cr.mutable { + m, err := cr.cm.Snapshotter.Mounts(ctx, getSnapshotID(cr.md)) + if err != nil { + return nil, errors.Wrapf(err, "failed to mount %s", cr.ID()) + } + if readonly { + m = setReadonly(m) + } + return m, nil + } + + if cr.equalMutable != nil && readonly { + m, err := cr.cm.Snapshotter.Mounts(ctx, getSnapshotID(cr.equalMutable.md)) + if err != nil { + return nil, errors.Wrapf(err, "failed to mount %s", cr.equalMutable.ID()) + } + return setReadonly(m), nil + } + + if err := cr.finalize(ctx, true); err != nil { + return nil, err + } + if cr.viewMount == nil { // TODO: handle this better + view := identity.NewID() + l, err := cr.cm.LeaseManager.Create(ctx, func(l *leases.Lease) error { + l.ID = view + l.Labels = map[string]string{ + "containerd.io/gc.flat": time.Now().UTC().Format(time.RFC3339Nano), + } + return nil + }, leaseutil.MakeTemporary) + if err != nil { + return nil, err + } + ctx = leases.WithLease(ctx, l.ID) + m, err := cr.cm.Snapshotter.View(ctx, view, getSnapshotID(cr.md)) + if err != nil { + cr.cm.LeaseManager.Delete(context.TODO(), leases.Lease{ID: l.ID}) + return nil, errors.Wrapf(err, "failed to mount %s", cr.ID()) + } + cr.view = view + cr.viewMount = m + } + return cr.viewMount, nil +} + +// call when holding the manager lock +func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) error { + delete(cr.cm.records, cr.ID()) + if cr.parent != nil { + cr.parent.mu.Lock() + err := cr.parent.release(ctx) + cr.parent.mu.Unlock() + if err != nil { + return err + } + } + if removeSnapshot { + if err := cr.cm.LeaseManager.Delete(context.TODO(), leases.Lease{ID: cr.ID()}); err != nil { + return errors.Wrapf(err, "failed to remove %s", cr.ID()) + } + } + if err := cr.cm.md.Clear(cr.ID()); err != nil { + return err + } + return nil +} + +func (cr *cacheRecord) ID() string { + return cr.md.ID() +} + +type immutableRef struct { + *cacheRecord + triggerLastUsed bool +} + +type mutableRef struct { + *cacheRecord + triggerLastUsed bool +} + +func (sr *immutableRef) Clone() ImmutableRef { + sr.mu.Lock() + ref := sr.ref(false) + sr.mu.Unlock() + return ref +} + +func (sr *immutableRef) Info() RefInfo { + return RefInfo{ + ChainID: digest.Digest(getChainID(sr.md)), + DiffID: digest.Digest(getDiffID(sr.md)), + Blob: digest.Digest(getBlob(sr.md)), + MediaType: getMediaType(sr.md), + BlobChainID: digest.Digest(getBlobChainID(sr.md)), + SnapshotID: getSnapshotID(sr.md), + Extracted: !getBlobOnly(sr.md), + } +} + +func (sr *immutableRef) Extract(ctx context.Context) error { + _, err := sr.sizeG.Do(ctx, sr.ID()+"-extract", func(ctx context.Context) (interface{}, error) { + snapshotID := getSnapshotID(sr.md) + if _, err := sr.cm.Snapshotter.Stat(ctx, snapshotID); err == nil { + queueBlobOnly(sr.md, false) + return nil, sr.md.Commit() + } + + parentID := "" + if sr.parent != nil { + if err := sr.parent.Extract(ctx); err != nil { + return nil, err + } + parentID = getSnapshotID(sr.parent.md) + } + info := sr.Info() + key := fmt.Sprintf("extract-%s %s", identity.NewID(), info.ChainID) + + err := sr.cm.Snapshotter.Prepare(ctx, key, parentID) + if err != nil { + return nil, err + } + + mountable, err := sr.cm.Snapshotter.Mounts(ctx, key) + if err != nil { + return nil, err + } + mounts, unmount, err := mountable.Mount() + if err != nil { + return nil, err + } + _, err = sr.cm.Applier.Apply(ctx, ocispec.Descriptor{ + Digest: info.Blob, + MediaType: info.MediaType, + }, mounts) + if err != nil { + unmount() + return nil, err + } + + if err := unmount(); err != nil { + return nil, err + } + if err := sr.cm.Snapshotter.Commit(ctx, getSnapshotID(sr.md), key); err != nil { + if !errdefs.IsAlreadyExists(err) { + return nil, err + } + } + queueBlobOnly(sr.md, false) + if err := sr.md.Commit(); err != nil { + return nil, err + } + return nil, nil + }) + return err +} + +// SetBlob associates a blob with the cache record. +// A lease must be held for the blob when calling this function +// Caller should call Info() for knowing what current values are actually set +func (sr *immutableRef) SetBlob(ctx context.Context, desc ocispec.Descriptor) error { + diffID, err := diffIDFromDescriptor(desc) + if err != nil { + return err + } + if _, err := sr.cm.ContentStore.Info(ctx, desc.Digest); err != nil { + return err + } + + sr.mu.Lock() + defer sr.mu.Unlock() + + if getChainID(sr.md) != "" { + return nil + } + + if err := sr.finalize(ctx, true); err != nil { + return err + } + + p := sr.parent + var parentChainID digest.Digest + var parentBlobChainID digest.Digest + if p != nil { + pInfo := p.Info() + if pInfo.ChainID == "" || pInfo.BlobChainID == "" { + return errors.Errorf("failed to set blob for reference with non-addressable parent") + } + parentChainID = pInfo.ChainID + parentBlobChainID = pInfo.BlobChainID + } + + if err := sr.cm.LeaseManager.AddResource(ctx, leases.Lease{ID: sr.ID()}, leases.Resource{ + ID: desc.Digest.String(), + Type: "content", + }); err != nil { + return err + } + + queueDiffID(sr.md, diffID.String()) + queueBlob(sr.md, desc.Digest.String()) + chainID := diffID + blobChainID := imagespecidentity.ChainID([]digest.Digest{desc.Digest, diffID}) + if parentChainID != "" { + chainID = imagespecidentity.ChainID([]digest.Digest{parentChainID, chainID}) + blobChainID = imagespecidentity.ChainID([]digest.Digest{parentBlobChainID, blobChainID}) + } + queueChainID(sr.md, chainID.String()) + queueBlobChainID(sr.md, blobChainID.String()) + queueMediaType(sr.md, desc.MediaType) + if err := sr.md.Commit(); err != nil { + return err + } + return nil +} + +func (sr *immutableRef) Release(ctx context.Context) error { + sr.cm.mu.Lock() + defer sr.cm.mu.Unlock() + + sr.mu.Lock() + defer sr.mu.Unlock() + + return sr.release(ctx) +} + +func (sr *immutableRef) updateLastUsed() bool { + return sr.triggerLastUsed +} + +func (sr *immutableRef) updateLastUsedNow() bool { + if !sr.triggerLastUsed { + return false + } + for r := range sr.refs { + if r.updateLastUsed() { + return false + } + } + return true +} + +func (sr *immutableRef) release(ctx context.Context) error { + delete(sr.refs, sr) + + if sr.updateLastUsedNow() { + updateLastUsed(sr.md) + if sr.equalMutable != nil { + sr.equalMutable.triggerLastUsed = true + } + } + + if len(sr.refs) == 0 { + if sr.viewMount != nil { // TODO: release viewMount earlier if possible + if err := sr.cm.LeaseManager.Delete(ctx, leases.Lease{ID: sr.view}); err != nil { + return errors.Wrapf(err, "failed to remove view lease %s", sr.view) + } + sr.view = "" + sr.viewMount = nil + } + + if sr.equalMutable != nil { + sr.equalMutable.release(ctx) + } + } + + return nil +} + +func (sr *immutableRef) Finalize(ctx context.Context, b bool) error { + sr.mu.Lock() + defer sr.mu.Unlock() + + return sr.finalize(ctx, b) +} + +func (cr *cacheRecord) Metadata() *metadata.StorageItem { + return cr.md +} + +func (cr *cacheRecord) finalize(ctx context.Context, commit bool) error { + mutable := cr.equalMutable + if mutable == nil { + return nil + } + if !commit { + if HasCachePolicyRetain(mutable) { + CachePolicyRetain(mutable) + return mutable.Metadata().Commit() + } + return nil + } + + _, err := cr.cm.ManagerOpt.LeaseManager.Create(ctx, func(l *leases.Lease) error { + l.ID = cr.ID() + l.Labels = map[string]string{ + "containerd.io/gc.flat": time.Now().UTC().Format(time.RFC3339Nano), + } + return nil + }) + if err != nil { + if !errdefs.IsAlreadyExists(err) { // migrator adds leases for everything + return errors.Wrap(err, "failed to create lease") + } + } + + if err := cr.cm.ManagerOpt.LeaseManager.AddResource(ctx, leases.Lease{ID: cr.ID()}, leases.Resource{ + ID: cr.ID(), + Type: "snapshots/" + cr.cm.ManagerOpt.Snapshotter.Name(), + }); err != nil { + cr.cm.LeaseManager.Delete(context.TODO(), leases.Lease{ID: cr.ID()}) + return errors.Wrapf(err, "failed to add snapshot %s to lease", cr.ID()) + } + + err = cr.cm.Snapshotter.Commit(ctx, cr.ID(), mutable.ID()) + if err != nil { + cr.cm.LeaseManager.Delete(context.TODO(), leases.Lease{ID: cr.ID()}) + return errors.Wrapf(err, "failed to commit %s", mutable.ID()) + } + mutable.dead = true + go func() { + cr.cm.mu.Lock() + defer cr.cm.mu.Unlock() + if err := mutable.remove(context.TODO(), true); err != nil { + logrus.Error(err) + } + }() + + cr.equalMutable = nil + clearEqualMutable(cr.md) + return cr.md.Commit() +} + +func (sr *mutableRef) updateLastUsed() bool { + return sr.triggerLastUsed +} + +func (sr *mutableRef) commit(ctx context.Context) (*immutableRef, error) { + if !sr.mutable || len(sr.refs) == 0 { + return nil, errors.Wrapf(errInvalid, "invalid mutable ref %p", sr) + } + + id := identity.NewID() + md, _ := sr.cm.md.Get(id) + rec := &cacheRecord{ + mu: sr.mu, + cm: sr.cm, + parent: sr.parentRef(false), + equalMutable: sr, + refs: make(map[ref]struct{}), + md: md, + } + + if descr := GetDescription(sr.md); descr != "" { + if err := queueDescription(md, descr); err != nil { + return nil, err + } + } + + parentID := "" + if rec.parent != nil { + parentID = rec.parent.ID() + } + if err := initializeMetadata(rec, parentID); err != nil { + return nil, err + } + + sr.cm.records[id] = rec + + if err := sr.md.Commit(); err != nil { + return nil, err + } + + queueCommitted(md) + setSize(md, sizeUnknown) + setEqualMutable(md, sr.ID()) + if err := md.Commit(); err != nil { + return nil, err + } + + ref := rec.ref(true) + sr.equalImmutable = ref + return ref, nil +} + +func (sr *mutableRef) updatesLastUsed() bool { + return sr.triggerLastUsed +} + +func (sr *mutableRef) Commit(ctx context.Context) (ImmutableRef, error) { + sr.cm.mu.Lock() + defer sr.cm.mu.Unlock() + + sr.mu.Lock() + defer sr.mu.Unlock() + + return sr.commit(ctx) +} + +func (sr *mutableRef) Release(ctx context.Context) error { + sr.cm.mu.Lock() + defer sr.cm.mu.Unlock() + + sr.mu.Lock() + defer sr.mu.Unlock() + + return sr.release(ctx) +} + +func (sr *mutableRef) release(ctx context.Context) error { + delete(sr.refs, sr) + if getCachePolicy(sr.md) != cachePolicyRetain { + if sr.equalImmutable != nil { + if getCachePolicy(sr.equalImmutable.md) == cachePolicyRetain { + if sr.updateLastUsed() { + updateLastUsed(sr.md) + sr.triggerLastUsed = false + } + return nil + } + if err := sr.equalImmutable.remove(ctx, false); err != nil { + return err + } + } + return sr.remove(ctx, true) + } else { + if sr.updateLastUsed() { + updateLastUsed(sr.md) + sr.triggerLastUsed = false + } + } + return nil +} + +func setReadonly(mounts snapshot.Mountable) snapshot.Mountable { + return &readOnlyMounter{mounts} +} + +type readOnlyMounter struct { + snapshot.Mountable +} + +func (m *readOnlyMounter) Mount() ([]mount.Mount, func() error, error) { + mounts, release, err := m.Mountable.Mount() + if err != nil { + return nil, nil, err + } + for i, m := range mounts { + if m.Type == "overlay" { + mounts[i].Options = readonlyOverlay(m.Options) + continue + } + opts := make([]string, 0, len(m.Options)) + for _, opt := range m.Options { + if opt != "rw" { + opts = append(opts, opt) + } + } + opts = append(opts, "ro") + mounts[i].Options = opts + } + return mounts, release, nil +} + +func readonlyOverlay(opt []string) []string { + out := make([]string, 0, len(opt)) + upper := "" + for _, o := range opt { + if strings.HasPrefix(o, "upperdir=") { + upper = strings.TrimPrefix(o, "upperdir=") + } else if !strings.HasPrefix(o, "workdir=") { + out = append(out, o) + } + } + if upper != "" { + for i, o := range out { + if strings.HasPrefix(o, "lowerdir=") { + out[i] = "lowerdir=" + upper + ":" + strings.TrimPrefix(o, "lowerdir=") + } + } + } + return out +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/export.go b/vendor/github.com/moby/buildkit/cache/remotecache/export.go new file mode 100644 index 00000000..32adf7b3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/export.go @@ -0,0 +1,142 @@ +package remotecache + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + v1 "github.com/moby/buildkit/cache/remotecache/v1" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/progress" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ResolveCacheExporterFunc func(ctx context.Context, attrs map[string]string) (Exporter, error) + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} + +type Exporter interface { + solver.CacheExporterTarget + // Finalize finalizes and return metadata that are returned to the client + // e.g. ExporterResponseManifestDesc + Finalize(ctx context.Context) (map[string]string, error) +} + +const ( + // ExportResponseManifestDesc is a key for the map returned from Exporter.Finalize. + // The map value is a JSON string of an OCI desciptor of a manifest. + ExporterResponseManifestDesc = "cache.manifest" +) + +type contentCacheExporter struct { + solver.CacheExporterTarget + chains *v1.CacheChains + ingester content.Ingester +} + +func NewExporter(ingester content.Ingester) Exporter { + cc := v1.NewCacheChains() + return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester} +} + +func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string, error) { + return export(ctx, ce.ingester, ce.chains) +} + +func export(ctx context.Context, ingester content.Ingester, cc *v1.CacheChains) (map[string]string, error) { + res := make(map[string]string) + config, descs, err := cc.Marshal() + if err != nil { + return nil, err + } + + // own type because oci type can't be pushed and docker type doesn't have annotations + type manifestList struct { + specs.Versioned + + MediaType string `json:"mediaType,omitempty"` + + // Manifests references platform specific manifests. + Manifests []ocispec.Descriptor `json:"manifests"` + } + + var mfst manifestList + mfst.SchemaVersion = 2 + mfst.MediaType = images.MediaTypeDockerSchema2ManifestList + + for _, l := range config.Layers { + dgstPair, ok := descs[l.Blob] + if !ok { + return nil, errors.Errorf("missing blob %s", l.Blob) + } + layerDone := oneOffProgress(ctx, fmt.Sprintf("writing layer %s", l.Blob)) + if err := contentutil.Copy(ctx, ingester, dgstPair.Provider, dgstPair.Descriptor); err != nil { + return nil, layerDone(errors.Wrap(err, "error writing layer blob")) + } + layerDone(nil) + mfst.Manifests = append(mfst.Manifests, dgstPair.Descriptor) + } + + dt, err := json.Marshal(config) + if err != nil { + return nil, err + } + dgst := digest.FromBytes(dt) + desc := ocispec.Descriptor{ + Digest: dgst, + Size: int64(len(dt)), + MediaType: v1.CacheConfigMediaTypeV0, + } + configDone := oneOffProgress(ctx, fmt.Sprintf("writing config %s", dgst)) + if err := content.WriteBlob(ctx, ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil { + return nil, configDone(errors.Wrap(err, "error writing config blob")) + } + configDone(nil) + + mfst.Manifests = append(mfst.Manifests, desc) + + dt, err = json.Marshal(mfst) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal manifest") + } + dgst = digest.FromBytes(dt) + + desc = ocispec.Descriptor{ + Digest: dgst, + Size: int64(len(dt)), + MediaType: mfst.MediaType, + } + mfstDone := oneOffProgress(ctx, fmt.Sprintf("writing manifest %s", dgst)) + if err := content.WriteBlob(ctx, ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil { + return nil, mfstDone(errors.Wrap(err, "error writing manifest blob")) + } + descJSON, err := json.Marshal(desc) + if err != nil { + return nil, err + } + res[ExporterResponseManifestDesc] = string(descJSON) + mfstDone(nil) + return res, nil +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/import.go b/vendor/github.com/moby/buildkit/cache/remotecache/import.go new file mode 100644 index 00000000..b19ad880 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/import.go @@ -0,0 +1,299 @@ +package remotecache + +import ( + "context" + "encoding/json" + "io" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + v1 "github.com/moby/buildkit/cache/remotecache/v1" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +// ResolveCacheImporterFunc returns importer and descriptor. +type ResolveCacheImporterFunc func(ctx context.Context, attrs map[string]string) (Importer, ocispec.Descriptor, error) + +type Importer interface { + Resolve(ctx context.Context, desc ocispec.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) +} + +type DistributionSourceLabelSetter interface { + SetDistributionSourceLabel(context.Context, digest.Digest) error + SetDistributionSourceAnnotation(desc ocispec.Descriptor) ocispec.Descriptor +} + +func NewImporter(provider content.Provider) Importer { + return &contentCacheImporter{provider: provider} +} + +type contentCacheImporter struct { + provider content.Provider +} + +func (ci *contentCacheImporter) Resolve(ctx context.Context, desc ocispec.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) { + dt, err := readBlob(ctx, ci.provider, desc) + if err != nil { + return nil, err + } + + var mfst ocispec.Index + if err := json.Unmarshal(dt, &mfst); err != nil { + return nil, err + } + + allLayers := v1.DescriptorProvider{} + + var configDesc ocispec.Descriptor + + for _, m := range mfst.Manifests { + if m.MediaType == v1.CacheConfigMediaTypeV0 { + configDesc = m + continue + } + allLayers[m.Digest] = v1.DescriptorProviderPair{ + Descriptor: m, + Provider: ci.provider, + } + } + + if dsls, ok := ci.provider.(DistributionSourceLabelSetter); ok { + for dgst, l := range allLayers { + err := dsls.SetDistributionSourceLabel(ctx, dgst) + _ = err // error ignored because layer may not exist + l.Descriptor = dsls.SetDistributionSourceAnnotation(l.Descriptor) + allLayers[dgst] = l + } + } + + if configDesc.Digest == "" { + return ci.importInlineCache(ctx, dt, id, w) + } + + dt, err = readBlob(ctx, ci.provider, configDesc) + if err != nil { + return nil, err + } + + cc := v1.NewCacheChains() + if err := v1.Parse(dt, allLayers, cc); err != nil { + return nil, err + } + + keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w) + if err != nil { + return nil, err + } + return solver.NewCacheManager(id, keysStorage, resultStorage), nil +} + +func readBlob(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]byte, error) { + maxBlobSize := int64(1 << 20) + if desc.Size > maxBlobSize { + return nil, errors.Errorf("blob %s is too large (%d > %d)", desc.Digest, desc.Size, maxBlobSize) + } + dt, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + // NOTE: even if err == EOF, we might have got expected dt here. + // For instance, http.Response.Body is known to return non-zero bytes with EOF. + if err == io.EOF { + if dtDigest := desc.Digest.Algorithm().FromBytes(dt); dtDigest != desc.Digest { + err = errors.Wrapf(err, "got EOF, expected %s (%d bytes), got %s (%d bytes)", + desc.Digest, desc.Size, dtDigest, len(dt)) + } else { + err = nil + } + } + } + return dt, errors.WithStack(err) +} + +func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte, id string, w worker.Worker) (solver.CacheManager, error) { + m := map[digest.Digest][]byte{} + + if err := ci.allDistributionManifests(ctx, dt, m); err != nil { + return nil, err + } + + var mu sync.Mutex + var cMap = map[digest.Digest]*v1.CacheChains{} + + eg, ctx := errgroup.WithContext(ctx) + for dgst, dt := range m { + func(dgst digest.Digest, dt []byte) { + eg.Go(func() error { + var m ocispec.Manifest + + if err := json.Unmarshal(dt, &m); err != nil { + return errors.WithStack(err) + } + + if m.Config.Digest == "" || len(m.Layers) == 0 { + return nil + } + + if dsls, ok := ci.provider.(DistributionSourceLabelSetter); ok { + for i, l := range m.Layers { + err := dsls.SetDistributionSourceLabel(ctx, l.Digest) + _ = err // error ignored because layer may not exist + m.Layers[i] = dsls.SetDistributionSourceAnnotation(l) + } + } + + p, err := content.ReadBlob(ctx, ci.provider, m.Config) + if err != nil { + return errors.WithStack(err) + } + + var img image + + if err := json.Unmarshal(p, &img); err != nil { + return errors.WithStack(err) + } + + if len(img.Rootfs.DiffIDs) != len(m.Layers) { + logrus.Warnf("invalid image with mismatching manifest and config") + return nil + } + + if img.Cache == nil { + return nil + } + + var config v1.CacheConfig + if err := json.Unmarshal(img.Cache, &config.Records); err != nil { + return errors.WithStack(err) + } + + createdDates, createdMsg, err := parseCreatedLayerInfo(img) + if err != nil { + return err + } + + layers := v1.DescriptorProvider{} + for i, m := range m.Layers { + if m.Annotations == nil { + m.Annotations = map[string]string{} + } + if createdAt := createdDates[i]; createdAt != "" { + m.Annotations["buildkit/createdat"] = createdAt + } + if createdBy := createdMsg[i]; createdBy != "" { + m.Annotations["buildkit/description"] = createdBy + } + m.Annotations["containerd.io/uncompressed"] = img.Rootfs.DiffIDs[i].String() + layers[m.Digest] = v1.DescriptorProviderPair{ + Descriptor: m, + Provider: ci.provider, + } + config.Layers = append(config.Layers, v1.CacheLayer{ + Blob: m.Digest, + ParentIndex: i - 1, + }) + } + + dt, err = json.Marshal(config) + if err != nil { + return errors.WithStack(err) + } + cc := v1.NewCacheChains() + if err := v1.ParseConfig(config, layers, cc); err != nil { + return err + } + mu.Lock() + cMap[dgst] = cc + mu.Unlock() + return nil + }) + }(dgst, dt) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + cms := make([]solver.CacheManager, 0, len(cMap)) + + for _, cc := range cMap { + keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w) + if err != nil { + return nil, err + } + cms = append(cms, solver.NewCacheManager(id, keysStorage, resultStorage)) + } + + return solver.NewCombinedCacheManager(cms, nil), nil +} + +func (ci *contentCacheImporter) allDistributionManifests(ctx context.Context, dt []byte, m map[digest.Digest][]byte) error { + mt, err := imageutil.DetectManifestBlobMediaType(dt) + if err != nil { + return err + } + + switch mt { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + m[digest.FromBytes(dt)] = dt + case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + var index ocispec.Index + if err := json.Unmarshal(dt, &index); err != nil { + return errors.WithStack(err) + } + + for _, d := range index.Manifests { + if _, ok := m[d.Digest]; ok { + continue + } + p, err := content.ReadBlob(ctx, ci.provider, d) + if err != nil { + return errors.WithStack(err) + } + if err := ci.allDistributionManifests(ctx, p, m); err != nil { + return err + } + } + } + + return nil +} + +type image struct { + Rootfs struct { + DiffIDs []digest.Digest `json:"diff_ids"` + } `json:"rootfs"` + Cache []byte `json:"moby.buildkit.cache.v0"` + History []struct { + Created *time.Time `json:"created,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` + } `json:"history,omitempty"` +} + +func parseCreatedLayerInfo(img image) ([]string, []string, error) { + dates := make([]string, 0, len(img.Rootfs.DiffIDs)) + createdBy := make([]string, 0, len(img.Rootfs.DiffIDs)) + for _, h := range img.History { + if !h.EmptyLayer { + str := "" + if h.Created != nil { + dt, err := h.Created.MarshalText() + if err != nil { + return nil, nil, err + } + str = string(dt) + } + dates = append(dates, str) + createdBy = append(createdBy, h.CreatedBy) + } + } + return dates, createdBy, nil +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go b/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go new file mode 100644 index 00000000..3cddb13d --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go @@ -0,0 +1,106 @@ +package registry + +import ( + "context" + "encoding/json" + + "github.com/moby/buildkit/cache/remotecache" + v1 "github.com/moby/buildkit/cache/remotecache/v1" + "github.com/moby/buildkit/solver" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc { + return func(ctx context.Context, _ map[string]string) (remotecache.Exporter, error) { + return NewExporter(), nil + } +} + +func NewExporter() remotecache.Exporter { + cc := v1.NewCacheChains() + return &exporter{CacheExporterTarget: cc, chains: cc} +} + +type exporter struct { + solver.CacheExporterTarget + chains *v1.CacheChains +} + +func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) { + return nil, nil +} + +func (ce *exporter) reset() { + cc := v1.NewCacheChains() + ce.CacheExporterTarget = cc + ce.chains = cc +} + +func (ce *exporter) ExportForLayers(layers []digest.Digest) ([]byte, error) { + config, descs, err := ce.chains.Marshal() + if err != nil { + return nil, err + } + + descs2 := map[digest.Digest]v1.DescriptorProviderPair{} + for _, k := range layers { + if v, ok := descs[k]; ok { + descs2[k] = v + continue + } + // fallback for uncompressed digests + for _, v := range descs { + if uc := v.Descriptor.Annotations["containerd.io/uncompressed"]; uc == string(k) { + descs2[v.Descriptor.Digest] = v + } + } + } + + cc := v1.NewCacheChains() + if err := v1.ParseConfig(*config, descs2, cc); err != nil { + return nil, err + } + + cfg, _, err := cc.Marshal() + if err != nil { + return nil, err + } + + if len(cfg.Layers) == 0 { + logrus.Warn("failed to match any cache with layers") + return nil, nil + } + + cache := map[int]int{} + + // reorder layers based on the order in the image + for i, r := range cfg.Records { + for j, rr := range r.Results { + n := getSortedLayerIndex(rr.LayerIndex, cfg.Layers, cache) + rr.LayerIndex = n + r.Results[j] = rr + cfg.Records[i] = r + } + } + + dt, err := json.Marshal(cfg.Records) + if err != nil { + return nil, err + } + ce.reset() + + return dt, nil +} + +func getSortedLayerIndex(idx int, layers []v1.CacheLayer, cache map[int]int) int { + if idx == -1 { + return -1 + } + l := layers[idx] + if i, ok := cache[idx]; ok { + return i + } + cache[idx] = getSortedLayerIndex(l.ParentIndex, layers, cache) + 1 + return cache[idx] +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go new file mode 100644 index 00000000..f66d5b4a --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go @@ -0,0 +1,83 @@ +package local + +import ( + "context" + "time" + + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/cache/remotecache" + "github.com/moby/buildkit/session" + sessioncontent "github.com/moby/buildkit/session/content" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + attrDigest = "digest" + attrSrc = "src" + attrDest = "dest" + contentStoreIDPrefix = "local:" +) + +// ResolveCacheExporterFunc for "local" cache exporter. +func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExporterFunc { + return func(ctx context.Context, attrs map[string]string) (remotecache.Exporter, error) { + store := attrs[attrDest] + if store == "" { + return nil, errors.New("local cache exporter requires dest") + } + csID := contentStoreIDPrefix + store + cs, err := getContentStore(ctx, sm, csID) + if err != nil { + return nil, err + } + return remotecache.NewExporter(cs), nil + } +} + +// ResolveCacheImporterFunc for "local" cache importer. +func ResolveCacheImporterFunc(sm *session.Manager) remotecache.ResolveCacheImporterFunc { + return func(ctx context.Context, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) { + dgstStr := attrs[attrDigest] + if dgstStr == "" { + return nil, specs.Descriptor{}, errors.New("local cache importer requires explicit digest") + } + dgst := digest.Digest(dgstStr) + store := attrs[attrSrc] + if store == "" { + return nil, specs.Descriptor{}, errors.New("local cache importer requires src") + } + csID := contentStoreIDPrefix + store + cs, err := getContentStore(ctx, sm, csID) + if err != nil { + return nil, specs.Descriptor{}, err + } + info, err := cs.Info(ctx, dgst) + if err != nil { + return nil, specs.Descriptor{}, err + } + desc := specs.Descriptor{ + // MediaType is typically MediaTypeDockerSchema2ManifestList, + // but we leave it empty until we get correct support for local index.json + Digest: dgst, + Size: info.Size, + } + return remotecache.NewImporter(cs), desc, nil + } +} + +func getContentStore(ctx context.Context, sm *session.Manager, storeID string) (content.Store, error) { + sessionID := session.FromContext(ctx) + if sessionID == "" { + return nil, errors.New("local cache exporter/importer requires session") + } + timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + caller, err := sm.Get(timeoutCtx, sessionID) + if err != nil { + return nil, err + } + return sessioncontent.NewCallerStore(caller, storeID), nil +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go new file mode 100644 index 00000000..a172917a --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go @@ -0,0 +1,96 @@ +package registry + +import ( + "context" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/remotes/docker" + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/cache/remotecache" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/resolver" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func canonicalizeRef(rawRef string) (string, error) { + if rawRef == "" { + return "", errors.New("missing ref") + } + parsed, err := reference.ParseNormalizedNamed(rawRef) + if err != nil { + return "", err + } + return reference.TagNameOnly(parsed).String(), nil +} + +const ( + attrRef = "ref" +) + +func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) remotecache.ResolveCacheExporterFunc { + return func(ctx context.Context, attrs map[string]string) (remotecache.Exporter, error) { + ref, err := canonicalizeRef(attrs[attrRef]) + if err != nil { + return nil, err + } + remote := resolver.New(ctx, hosts, sm) + pusher, err := remote.Pusher(ctx, ref) + if err != nil { + return nil, err + } + return remotecache.NewExporter(contentutil.FromPusher(pusher)), nil + } +} + +func ResolveCacheImporterFunc(sm *session.Manager, cs content.Store, hosts docker.RegistryHosts) remotecache.ResolveCacheImporterFunc { + return func(ctx context.Context, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) { + ref, err := canonicalizeRef(attrs[attrRef]) + if err != nil { + return nil, specs.Descriptor{}, err + } + remote := resolver.New(ctx, hosts, sm) + xref, desc, err := remote.Resolve(ctx, ref) + if err != nil { + return nil, specs.Descriptor{}, err + } + fetcher, err := remote.Fetcher(ctx, xref) + if err != nil { + return nil, specs.Descriptor{}, err + } + src := &withDistributionSourceLabel{ + Provider: contentutil.FromFetcher(fetcher), + ref: ref, + source: cs, + } + return remotecache.NewImporter(src), desc, nil + } +} + +type withDistributionSourceLabel struct { + content.Provider + ref string + source content.Manager +} + +var _ remotecache.DistributionSourceLabelSetter = &withDistributionSourceLabel{} + +func (dsl *withDistributionSourceLabel) SetDistributionSourceLabel(ctx context.Context, dgst digest.Digest) error { + hf, err := docker.AppendDistributionSourceLabel(dsl.source, dsl.ref) + if err != nil { + return err + } + _, err = hf(ctx, ocispec.Descriptor{Digest: dgst}) + return err +} + +func (dsl *withDistributionSourceLabel) SetDistributionSourceAnnotation(desc ocispec.Descriptor) ocispec.Descriptor { + if desc.Annotations == nil { + desc.Annotations = map[string]string{} + } + desc.Annotations["containerd.io/distribution.source.ref"] = dsl.ref + return desc +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go new file mode 100644 index 00000000..728b85a2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go @@ -0,0 +1,297 @@ +package cacheimport + +import ( + "context" + "time" + + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func NewCacheKeyStorage(cc *CacheChains, w worker.Worker) (solver.CacheKeyStorage, solver.CacheResultStorage, error) { + storage := &cacheKeyStorage{ + byID: map[string]*itemWithOutgoingLinks{}, + byItem: map[*item]string{}, + byResult: map[string]map[string]struct{}{}, + } + + for _, it := range cc.items { + if _, err := addItemToStorage(storage, it); err != nil { + return nil, nil, err + } + } + + results := &cacheResultStorage{ + w: w, + byID: storage.byID, + byItem: storage.byItem, + byResult: storage.byResult, + } + + return storage, results, nil +} + +func addItemToStorage(k *cacheKeyStorage, it *item) (*itemWithOutgoingLinks, error) { + if id, ok := k.byItem[it]; ok { + if id == "" { + return nil, errors.Errorf("invalid loop") + } + return k.byID[id], nil + } + + var id string + if len(it.links) == 0 { + id = it.dgst.String() + } else { + id = identity.NewID() + } + + k.byItem[it] = "" + + for i, m := range it.links { + for l := range m { + src, err := addItemToStorage(k, l.src) + if err != nil { + return nil, err + } + cl := nlink{ + input: i, + dgst: it.dgst, + selector: l.selector, + } + src.links[cl] = append(src.links[cl], id) + } + } + + k.byItem[it] = id + + itl := &itemWithOutgoingLinks{ + item: it, + links: map[nlink][]string{}, + } + + k.byID[id] = itl + + if res := it.result; res != nil { + resultID := remoteID(res) + ids, ok := k.byResult[resultID] + if !ok { + ids = map[string]struct{}{} + k.byResult[resultID] = ids + } + ids[id] = struct{}{} + } + return itl, nil +} + +type cacheKeyStorage struct { + byID map[string]*itemWithOutgoingLinks + byItem map[*item]string + byResult map[string]map[string]struct{} +} + +type itemWithOutgoingLinks struct { + *item + links map[nlink][]string +} + +func (cs *cacheKeyStorage) Exists(id string) bool { + _, ok := cs.byID[id] + return ok +} + +func (cs *cacheKeyStorage) Walk(func(id string) error) error { + return nil +} + +func (cs *cacheKeyStorage) WalkResults(id string, fn func(solver.CacheResult) error) error { + it, ok := cs.byID[id] + if !ok { + return nil + } + if res := it.result; res != nil { + return fn(solver.CacheResult{ID: remoteID(res), CreatedAt: it.resultTime}) + } + return nil +} + +func (cs *cacheKeyStorage) Load(id string, resultID string) (solver.CacheResult, error) { + it, ok := cs.byID[id] + if !ok { + return solver.CacheResult{}, nil + } + if res := it.result; res != nil { + return solver.CacheResult{ID: remoteID(res), CreatedAt: it.resultTime}, nil + } + return solver.CacheResult{}, nil +} + +func (cs *cacheKeyStorage) AddResult(id string, res solver.CacheResult) error { + return nil +} + +func (cs *cacheKeyStorage) Release(resultID string) error { + return nil +} +func (cs *cacheKeyStorage) AddLink(id string, link solver.CacheInfoLink, target string) error { + return nil +} +func (cs *cacheKeyStorage) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error { + it, ok := cs.byID[id] + if !ok { + return nil + } + for _, id := range it.links[nlink{ + dgst: outputKey(link.Digest, int(link.Output)), + input: int(link.Input), + selector: link.Selector.String(), + }] { + if err := fn(id); err != nil { + return err + } + } + return nil +} + +func (cs *cacheKeyStorage) WalkBacklinks(id string, fn func(id string, link solver.CacheInfoLink) error) error { + for k, it := range cs.byID { + for nl, ids := range it.links { + for _, id2 := range ids { + if id == id2 { + if err := fn(k, solver.CacheInfoLink{ + Input: solver.Index(nl.input), + Selector: digest.Digest(nl.selector), + Digest: nl.dgst, + }); err != nil { + return err + } + } + } + } + } + return nil +} + +func (cs *cacheKeyStorage) WalkIDsByResult(id string, fn func(id string) error) error { + ids := cs.byResult[id] + for id := range ids { + if err := fn(id); err != nil { + return err + } + } + return nil +} + +func (cs *cacheKeyStorage) HasLink(id string, link solver.CacheInfoLink, target string) bool { + l := nlink{ + dgst: outputKey(link.Digest, int(link.Output)), + input: int(link.Input), + selector: link.Selector.String(), + } + if it, ok := cs.byID[id]; ok { + for _, id := range it.links[l] { + if id == target { + return true + } + } + } + return false +} + +type cacheResultStorage struct { + w worker.Worker + byID map[string]*itemWithOutgoingLinks + byResult map[string]map[string]struct{} + byItem map[*item]string +} + +func (cs *cacheResultStorage) Save(res solver.Result, createdAt time.Time) (solver.CacheResult, error) { + return solver.CacheResult{}, errors.Errorf("importer is immutable") +} + +func (cs *cacheResultStorage) LoadWithParents(ctx context.Context, res solver.CacheResult) (map[string]solver.Result, error) { + v := cs.byResultID(res.ID) + if v == nil || v.result == nil { + return nil, errors.WithStack(solver.ErrNotFound) + } + + m := map[string]solver.Result{} + + visited := make(map[*item]struct{}) + if err := v.walkAllResults(func(i *item) error { + if i.result == nil { + return nil + } + id, ok := cs.byItem[i] + if !ok { + return nil + } + if isSubRemote(*i.result, *v.result) { + ref, err := cs.w.FromRemote(ctx, i.result) + if err != nil { + return err + } + m[id] = worker.NewWorkerRefResult(ref, cs.w) + } + return nil + }, visited); err != nil { + for _, v := range m { + v.Release(context.TODO()) + } + return nil, err + } + + return m, nil +} + +func (cs *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) (solver.Result, error) { + item := cs.byResultID(res.ID) + if item == nil || item.result == nil { + return nil, errors.WithStack(solver.ErrNotFound) + } + + ref, err := cs.w.FromRemote(ctx, item.result) + if err != nil { + return nil, errors.Wrap(err, "failed to load result from remote") + } + return worker.NewWorkerRefResult(ref, cs.w), nil +} + +func (cs *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult) (*solver.Remote, error) { + if r := cs.byResultID(res.ID); r != nil && r.result != nil { + return r.result, nil + } + return nil, errors.WithStack(solver.ErrNotFound) +} + +func (cs *cacheResultStorage) Exists(id string) bool { + return cs.byResultID(id) != nil +} + +func (cs *cacheResultStorage) byResultID(resultID string) *itemWithOutgoingLinks { + m, ok := cs.byResult[resultID] + if !ok || len(m) == 0 { + return nil + } + + for id := range m { + it, ok := cs.byID[id] + if ok { + return it + } + } + + return nil +} + +// unique ID per remote. this ID is not stable. +func remoteID(r *solver.Remote) string { + dgstr := digest.Canonical.Digester() + for _, desc := range r.Descriptors { + dgstr.Hash().Write([]byte(desc.Digest)) + } + return dgstr.Digest().String() +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go new file mode 100644 index 00000000..a9b11c3e --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go @@ -0,0 +1,158 @@ +package cacheimport + +import ( + "strings" + "time" + + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/solver" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +func NewCacheChains() *CacheChains { + return &CacheChains{visited: map[interface{}]struct{}{}} +} + +type CacheChains struct { + items []*item + visited map[interface{}]struct{} +} + +func (c *CacheChains) Add(dgst digest.Digest) solver.CacheExporterRecord { + if strings.HasPrefix(dgst.String(), "random:") { + return &nopRecord{} + } + it := &item{c: c, dgst: dgst} + c.items = append(c.items, it) + return it +} + +func (c *CacheChains) Visit(v interface{}) { + c.visited[v] = struct{}{} +} + +func (c *CacheChains) Visited(v interface{}) bool { + _, ok := c.visited[v] + return ok +} + +func (c *CacheChains) normalize() error { + st := &normalizeState{ + added: map[*item]*item{}, + links: map[*item]map[nlink]map[digest.Digest]struct{}{}, + byKey: map[digest.Digest]*item{}, + } + + for _, it := range c.items { + _, err := normalizeItem(it, st) + if err != nil { + return err + } + } + + items := make([]*item, 0, len(st.byKey)) + for _, it := range st.byKey { + items = append(items, it) + } + c.items = items + return nil +} + +func (c *CacheChains) Marshal() (*CacheConfig, DescriptorProvider, error) { + if err := c.normalize(); err != nil { + return nil, nil, err + } + + st := &marshalState{ + chainsByID: map[string]int{}, + descriptors: DescriptorProvider{}, + recordsByItem: map[*item]int{}, + } + + for _, it := range c.items { + if err := marshalItem(it, st); err != nil { + return nil, nil, err + } + } + + cc := CacheConfig{ + Layers: st.layers, + Records: st.records, + } + sortConfig(&cc) + + return &cc, st.descriptors, nil +} + +type DescriptorProvider map[digest.Digest]DescriptorProviderPair + +type DescriptorProviderPair struct { + Descriptor ocispec.Descriptor + Provider content.Provider +} + +type item struct { + c *CacheChains + dgst digest.Digest + + result *solver.Remote + resultTime time.Time + + links []map[link]struct{} +} + +type link struct { + src *item + selector string +} + +func (c *item) AddResult(createdAt time.Time, result *solver.Remote) { + c.resultTime = createdAt + c.result = result +} + +func (c *item) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) { + src, ok := rec.(*item) + if !ok { + return + } + + for { + if index < len(c.links) { + break + } + c.links = append(c.links, map[link]struct{}{}) + } + + c.links[index][link{src: src, selector: selector}] = struct{}{} +} + +func (c *item) walkAllResults(fn func(i *item) error, visited map[*item]struct{}) error { + if _, ok := visited[c]; ok { + return nil + } + visited[c] = struct{}{} + if err := fn(c); err != nil { + return err + } + for _, links := range c.links { + for l := range links { + if err := l.src.walkAllResults(fn, visited); err != nil { + return err + } + } + } + return nil +} + +type nopRecord struct { +} + +func (c *nopRecord) AddResult(createdAt time.Time, result *solver.Remote) { +} + +func (c *nopRecord) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) { +} + +var _ solver.CacheExporterTarget = &CacheChains{} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go new file mode 100644 index 00000000..97d21a45 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go @@ -0,0 +1,50 @@ +package cacheimport + +// Distibutable build cache +// +// Main manifest is OCI image index +// https://github.com/opencontainers/image-spec/blob/master/image-index.md . +// Manifests array contains descriptors to the cache layers and one instance of +// build cache config with media type application/vnd.buildkit.cacheconfig.v0 . +// The cache layer descriptors need to have an annotation with uncompressed digest +// to allow deduplication on extraction and optionally "buildkit/createdat" +// annotation to support maintaining original timestamps. +// +// Cache config file layout: +// +//{ +// "layers": [ +// { +// "blob": "sha256:deadbeef", <- digest of layer blob in index +// "parent": -1 <- index of parent layer, -1 if no parent +// }, +// { +// "blob": "sha256:deadbeef", +// "parent": 0 +// } +// ], +// +// "records": [ +// { +// "digest": "sha256:deadbeef", <- base digest for the record +// }, +// { +// "digest": "sha256:deadbeef", +// "output": 1, <- optional output index +// "layers": [ <- optional array or layer chains +// { +// "createdat": "", +// "layer": 1, <- index to the layer +// } +// ], +// "inputs": [ <- dependant records +// [ <- index of the dependency (0) +// { +// "selector": "sel", <- optional selector +// "link": 0, <- index to the dependant record +// } +// ] +// ] +// } +// ] +// } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go new file mode 100644 index 00000000..79adf014 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go @@ -0,0 +1,110 @@ +package cacheimport + +import ( + "encoding/json" + + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/contentutil" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func Parse(configJSON []byte, provider DescriptorProvider, t solver.CacheExporterTarget) error { + var config CacheConfig + if err := json.Unmarshal(configJSON, &config); err != nil { + return errors.WithStack(err) + } + + return ParseConfig(config, provider, t) +} + +func ParseConfig(config CacheConfig, provider DescriptorProvider, t solver.CacheExporterTarget) error { + cache := map[int]solver.CacheExporterRecord{} + + for i := range config.Records { + if _, err := parseRecord(config, i, provider, t, cache); err != nil { + return err + } + } + return nil +} + +func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver.CacheExporterTarget, cache map[int]solver.CacheExporterRecord) (solver.CacheExporterRecord, error) { + if r, ok := cache[idx]; ok { + if r == nil { + return nil, errors.Errorf("invalid looping record") + } + return r, nil + } + + if idx < 0 || idx >= len(cc.Records) { + return nil, errors.Errorf("invalid record ID: %d", idx) + } + rec := cc.Records[idx] + + r := t.Add(rec.Digest) + cache[idx] = nil + for i, inputs := range rec.Inputs { + for _, inp := range inputs { + src, err := parseRecord(cc, inp.LinkIndex, provider, t, cache) + if err != nil { + return nil, err + } + r.LinkFrom(src, i, inp.Selector) + } + } + + for _, res := range rec.Results { + visited := map[int]struct{}{} + remote, err := getRemoteChain(cc.Layers, res.LayerIndex, provider, visited) + if err != nil { + return nil, err + } + if remote != nil { + r.AddResult(res.CreatedAt, remote) + } + } + + cache[idx] = r + return r, nil +} + +func getRemoteChain(layers []CacheLayer, idx int, provider DescriptorProvider, visited map[int]struct{}) (*solver.Remote, error) { + if _, ok := visited[idx]; ok { + return nil, errors.Errorf("invalid looping layer") + } + visited[idx] = struct{}{} + + if idx < 0 || idx >= len(layers) { + return nil, errors.Errorf("invalid layer index %d", idx) + } + + l := layers[idx] + + descPair, ok := provider[l.Blob] + if !ok { + return nil, nil + } + + var r *solver.Remote + if l.ParentIndex != -1 { + var err error + r, err = getRemoteChain(layers, l.ParentIndex, provider, visited) + if err != nil { + return nil, err + } + if r == nil { + return nil, nil + } + r.Descriptors = append(r.Descriptors, descPair.Descriptor) + mp := contentutil.NewMultiProvider(r.Provider) + mp.Add(descPair.Descriptor.Digest, descPair.Provider) + r.Provider = mp + return r, nil + } + return &solver.Remote{ + Descriptors: []ocispec.Descriptor{descPair.Descriptor}, + Provider: descPair.Provider, + }, nil + +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go new file mode 100644 index 00000000..4c6bc0bb --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go @@ -0,0 +1,35 @@ +package cacheimport + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +const CacheConfigMediaTypeV0 = "application/vnd.buildkit.cacheconfig.v0" + +type CacheConfig struct { + Layers []CacheLayer `json:"layers,omitempty"` + Records []CacheRecord `json:"records,omitempty"` +} + +type CacheLayer struct { + Blob digest.Digest `json:"blob,omitempty"` + ParentIndex int `json:"parent,omitempty"` +} + +type CacheRecord struct { + Results []CacheResult `json:"layers,omitempty"` + Digest digest.Digest `json:"digest,omitempty"` + Inputs [][]CacheInput `json:"inputs,omitempty"` +} + +type CacheResult struct { + LayerIndex int `json:"layer"` + CreatedAt time.Time `json:"createdAt,omitempty"` +} + +type CacheInput struct { + Selector string `json:"selector,omitempty"` + LinkIndex int `json:"link"` +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go new file mode 100644 index 00000000..dfc7fab3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go @@ -0,0 +1,322 @@ +package cacheimport + +import ( + "fmt" + "sort" + + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/solver" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// sortConfig sorts the config structure to make sure it is deterministic +func sortConfig(cc *CacheConfig) { + type indexedLayer struct { + oldIndex int + newIndex int + l CacheLayer + } + + unsortedLayers := make([]*indexedLayer, len(cc.Layers)) + sortedLayers := make([]*indexedLayer, len(cc.Layers)) + + for i, l := range cc.Layers { + il := &indexedLayer{oldIndex: i, l: l} + unsortedLayers[i] = il + sortedLayers[i] = il + } + sort.Slice(sortedLayers, func(i, j int) bool { + li := sortedLayers[i].l + lj := sortedLayers[j].l + if li.Blob == lj.Blob { + return li.ParentIndex < lj.ParentIndex + } + return li.Blob < lj.Blob + }) + for i, l := range sortedLayers { + l.newIndex = i + } + + layers := make([]CacheLayer, len(sortedLayers)) + for i, l := range sortedLayers { + if pID := l.l.ParentIndex; pID != -1 { + l.l.ParentIndex = unsortedLayers[pID].newIndex + } + layers[i] = l.l + } + + type indexedRecord struct { + oldIndex int + newIndex int + r CacheRecord + } + + unsortedRecords := make([]*indexedRecord, len(cc.Records)) + sortedRecords := make([]*indexedRecord, len(cc.Records)) + + for i, r := range cc.Records { + ir := &indexedRecord{oldIndex: i, r: r} + unsortedRecords[i] = ir + sortedRecords[i] = ir + } + sort.Slice(sortedRecords, func(i, j int) bool { + ri := sortedRecords[i].r + rj := sortedRecords[j].r + if ri.Digest != rj.Digest { + return ri.Digest < rj.Digest + } + if len(ri.Inputs) != len(rj.Inputs) { + return len(ri.Inputs) < len(rj.Inputs) + } + for i, inputs := range ri.Inputs { + if len(ri.Inputs[i]) != len(rj.Inputs[i]) { + return len(ri.Inputs[i]) < len(rj.Inputs[i]) + } + for j := range inputs { + if ri.Inputs[i][j].Selector != rj.Inputs[i][j].Selector { + return ri.Inputs[i][j].Selector < rj.Inputs[i][j].Selector + } + inputDigesti := cc.Records[ri.Inputs[i][j].LinkIndex].Digest + inputDigestj := cc.Records[rj.Inputs[i][j].LinkIndex].Digest + if inputDigesti != inputDigestj { + return inputDigesti < inputDigestj + } + } + } + return false + }) + for i, l := range sortedRecords { + l.newIndex = i + } + + records := make([]CacheRecord, len(sortedRecords)) + for i, r := range sortedRecords { + for j := range r.r.Results { + r.r.Results[j].LayerIndex = unsortedLayers[r.r.Results[j].LayerIndex].newIndex + } + for j, inputs := range r.r.Inputs { + for k := range inputs { + r.r.Inputs[j][k].LinkIndex = unsortedRecords[r.r.Inputs[j][k].LinkIndex].newIndex + } + sort.Slice(inputs, func(i, j int) bool { + return inputs[i].LinkIndex < inputs[j].LinkIndex + }) + } + records[i] = r.r + } + + cc.Layers = layers + cc.Records = records +} + +func outputKey(dgst digest.Digest, idx int) digest.Digest { + return digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, idx))) +} + +type nlink struct { + dgst digest.Digest + input int + selector string +} +type normalizeState struct { + added map[*item]*item + links map[*item]map[nlink]map[digest.Digest]struct{} + byKey map[digest.Digest]*item + next int +} + +func normalizeItem(it *item, state *normalizeState) (*item, error) { + if it2, ok := state.added[it]; ok { + return it2, nil + } + + if len(it.links) == 0 { + id := it.dgst + if it2, ok := state.byKey[id]; ok { + state.added[it] = it2 + return it2, nil + } + state.byKey[id] = it + state.added[it] = it + return nil, nil + } + + matches := map[digest.Digest]struct{}{} + + // check if there is already a matching record + for i, m := range it.links { + if len(m) == 0 { + return nil, errors.Errorf("invalid incomplete links") + } + for l := range m { + nl := nlink{dgst: it.dgst, input: i, selector: l.selector} + it2, err := normalizeItem(l.src, state) + if err != nil { + return nil, err + } + links := state.links[it2][nl] + if i == 0 { + for id := range links { + matches[id] = struct{}{} + } + } else { + for id := range matches { + if _, ok := links[id]; !ok { + delete(matches, id) + } + } + } + } + } + + var id digest.Digest + + links := it.links + + if len(matches) > 0 { + for m := range matches { + if id == "" || id > m { + id = m + } + } + } else { + // keep tmp IDs deterministic + state.next++ + id = digest.FromBytes([]byte(fmt.Sprintf("%d", state.next))) + state.byKey[id] = it + it.links = make([]map[link]struct{}, len(it.links)) + for i := range it.links { + it.links[i] = map[link]struct{}{} + } + } + + it2 := state.byKey[id] + state.added[it] = it2 + + for i, m := range links { + for l := range m { + subIt, err := normalizeItem(l.src, state) + if err != nil { + return nil, err + } + it2.links[i][link{src: subIt, selector: l.selector}] = struct{}{} + + nl := nlink{dgst: it.dgst, input: i, selector: l.selector} + if _, ok := state.links[subIt]; !ok { + state.links[subIt] = map[nlink]map[digest.Digest]struct{}{} + } + if _, ok := state.links[subIt][nl]; !ok { + state.links[subIt][nl] = map[digest.Digest]struct{}{} + } + state.links[subIt][nl][id] = struct{}{} + } + } + + return it2, nil +} + +type marshalState struct { + layers []CacheLayer + chainsByID map[string]int + descriptors DescriptorProvider + + records []CacheRecord + recordsByItem map[*item]int +} + +func marshalRemote(r *solver.Remote, state *marshalState) string { + if len(r.Descriptors) == 0 { + return "" + } + type Remote struct { + Descriptors []ocispec.Descriptor + Provider content.Provider + } + var parentID string + if len(r.Descriptors) > 1 { + r2 := &solver.Remote{ + Descriptors: r.Descriptors[:len(r.Descriptors)-1], + Provider: r.Provider, + } + parentID = marshalRemote(r2, state) + } + desc := r.Descriptors[len(r.Descriptors)-1] + + state.descriptors[desc.Digest] = DescriptorProviderPair{ + Descriptor: desc, + Provider: r.Provider, + } + + id := desc.Digest.String() + parentID + + if _, ok := state.chainsByID[id]; ok { + return id + } + + state.chainsByID[id] = len(state.layers) + l := CacheLayer{ + Blob: desc.Digest, + ParentIndex: -1, + } + if parentID != "" { + l.ParentIndex = state.chainsByID[parentID] + } + state.layers = append(state.layers, l) + return id +} + +func marshalItem(it *item, state *marshalState) error { + if _, ok := state.recordsByItem[it]; ok { + return nil + } + + rec := CacheRecord{ + Digest: it.dgst, + Inputs: make([][]CacheInput, len(it.links)), + } + + for i, m := range it.links { + for l := range m { + if err := marshalItem(l.src, state); err != nil { + return err + } + idx, ok := state.recordsByItem[l.src] + if !ok { + return errors.Errorf("invalid source record: %v", l.src) + } + rec.Inputs[i] = append(rec.Inputs[i], CacheInput{ + Selector: l.selector, + LinkIndex: idx, + }) + } + } + + if it.result != nil { + id := marshalRemote(it.result, state) + if id != "" { + idx, ok := state.chainsByID[id] + if !ok { + return errors.Errorf("parent chainid not found") + } + rec.Results = append(rec.Results, CacheResult{LayerIndex: idx, CreatedAt: it.resultTime}) + } + } + + state.recordsByItem[it] = len(state.records) + state.records = append(state.records, rec) + return nil +} + +func isSubRemote(sub, main solver.Remote) bool { + if len(sub.Descriptors) > len(main.Descriptors) { + return false + } + for i := range sub.Descriptors { + if sub.Descriptors[i].Digest != main.Descriptors[i].Digest { + return false + } + } + return true +} diff --git a/vendor/github.com/moby/buildkit/cache/util/fsutil.go b/vendor/github.com/moby/buildkit/cache/util/fsutil.go new file mode 100644 index 00000000..41e5465f --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/util/fsutil.go @@ -0,0 +1,139 @@ +package util + +import ( + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containerd/continuity/fs" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/snapshot" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" + fstypes "github.com/tonistiigi/fsutil/types" +) + +type ReadRequest struct { + Filename string + Range *FileRange +} + +type FileRange struct { + Offset int + Length int +} + +func withMount(ctx context.Context, ref cache.ImmutableRef, cb func(string) error) error { + mount, err := ref.Mount(ctx, true) + if err != nil { + return err + } + + lm := snapshot.LocalMounter(mount) + + root, err := lm.Mount() + if err != nil { + return err + } + + defer func() { + if lm != nil { + lm.Unmount() + } + }() + + if err := cb(root); err != nil { + return err + } + + if err := lm.Unmount(); err != nil { + return err + } + lm = nil + return nil +} + +func ReadFile(ctx context.Context, ref cache.ImmutableRef, req ReadRequest) ([]byte, error) { + var dt []byte + + err := withMount(ctx, ref, func(root string) error { + fp, err := fs.RootPath(root, req.Filename) + if err != nil { + return errors.WithStack(err) + } + + if req.Range == nil { + dt, err = ioutil.ReadFile(fp) + if err != nil { + return errors.WithStack(err) + } + } else { + f, err := os.Open(fp) + if err != nil { + return errors.WithStack(err) + } + dt, err = ioutil.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length))) + f.Close() + if err != nil { + return errors.WithStack(err) + } + } + return nil + }) + return dt, err +} + +type ReadDirRequest struct { + Path string + IncludePattern string +} + +func ReadDir(ctx context.Context, ref cache.ImmutableRef, req ReadDirRequest) ([]*fstypes.Stat, error) { + var ( + rd []*fstypes.Stat + wo fsutil.WalkOpt + ) + if req.IncludePattern != "" { + wo.IncludePatterns = append(wo.IncludePatterns, req.IncludePattern) + } + err := withMount(ctx, ref, func(root string) error { + fp, err := fs.RootPath(root, req.Path) + if err != nil { + return errors.WithStack(err) + } + return fsutil.Walk(ctx, fp, &wo, func(path string, info os.FileInfo, err error) error { + if err != nil { + return errors.Wrapf(err, "walking %q", root) + } + stat, ok := info.Sys().(*fstypes.Stat) + if !ok { + // This "can't happen(tm)". + return errors.Errorf("expected a *fsutil.Stat but got %T", info.Sys()) + } + rd = append(rd, stat) + + if info.IsDir() { + return filepath.SkipDir + } + return nil + }) + }) + return rd, err +} + +func StatFile(ctx context.Context, ref cache.ImmutableRef, path string) (*fstypes.Stat, error) { + var st *fstypes.Stat + err := withMount(ctx, ref, func(root string) error { + fp, err := fs.RootPath(root, path) + if err != nil { + return errors.WithStack(err) + } + if st, err = fsutil.Stat(fp); err != nil { + return errors.WithStack(err) + } + return nil + }) + return st, err +} diff --git a/vendor/github.com/moby/buildkit/client/build.go b/vendor/github.com/moby/buildkit/client/build.go new file mode 100644 index 00000000..2518cd7c --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/build.go @@ -0,0 +1,110 @@ +package client + +import ( + "context" + + "github.com/moby/buildkit/client/buildid" + gateway "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/frontend/gateway/grpcclient" + gatewayapi "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/apicaps" + "github.com/pkg/errors" + "google.golang.org/grpc" +) + +func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildFunc gateway.BuildFunc, statusChan chan *SolveStatus) (*SolveResponse, error) { + defer func() { + if statusChan != nil { + close(statusChan) + } + }() + + if opt.Frontend != "" { + return nil, errors.New("invalid SolveOpt, Build interface cannot use Frontend") + } + + if product == "" { + product = apicaps.ExportedProduct + } + + feOpts := opt.FrontendAttrs + opt.FrontendAttrs = nil + + workers, err := c.ListWorkers(ctx) + if err != nil { + return nil, errors.Wrap(err, "listing workers for Build") + } + var gworkers []gateway.WorkerInfo + for _, w := range workers { + gworkers = append(gworkers, gateway.WorkerInfo{ + ID: w.ID, + Labels: w.Labels, + Platforms: w.Platforms, + }) + } + + cb := func(ref string, s *session.Session) error { + g, err := grpcclient.New(ctx, feOpts, s.ID(), product, c.gatewayClientForBuild(ref), gworkers) + if err != nil { + return err + } + + if err := g.Run(ctx, buildFunc); err != nil { + return errors.Wrap(err, "failed to run Build function") + } + return nil + } + + return c.solve(ctx, nil, cb, opt, statusChan) +} + +func (c *Client) gatewayClientForBuild(buildid string) gatewayapi.LLBBridgeClient { + g := gatewayapi.NewLLBBridgeClient(c.conn) + return &gatewayClientForBuild{g, buildid} +} + +type gatewayClientForBuild struct { + gateway gatewayapi.LLBBridgeClient + buildID string +} + +func (g *gatewayClientForBuild) ResolveImageConfig(ctx context.Context, in *gatewayapi.ResolveImageConfigRequest, opts ...grpc.CallOption) (*gatewayapi.ResolveImageConfigResponse, error) { + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + return g.gateway.ResolveImageConfig(ctx, in, opts...) +} + +func (g *gatewayClientForBuild) Solve(ctx context.Context, in *gatewayapi.SolveRequest, opts ...grpc.CallOption) (*gatewayapi.SolveResponse, error) { + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + return g.gateway.Solve(ctx, in, opts...) +} + +func (g *gatewayClientForBuild) ReadFile(ctx context.Context, in *gatewayapi.ReadFileRequest, opts ...grpc.CallOption) (*gatewayapi.ReadFileResponse, error) { + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + return g.gateway.ReadFile(ctx, in, opts...) +} + +func (g *gatewayClientForBuild) ReadDir(ctx context.Context, in *gatewayapi.ReadDirRequest, opts ...grpc.CallOption) (*gatewayapi.ReadDirResponse, error) { + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + return g.gateway.ReadDir(ctx, in, opts...) +} + +func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.StatFileRequest, opts ...grpc.CallOption) (*gatewayapi.StatFileResponse, error) { + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + return g.gateway.StatFile(ctx, in, opts...) +} + +func (g *gatewayClientForBuild) Ping(ctx context.Context, in *gatewayapi.PingRequest, opts ...grpc.CallOption) (*gatewayapi.PongResponse, error) { + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + return g.gateway.Ping(ctx, in, opts...) +} + +func (g *gatewayClientForBuild) Return(ctx context.Context, in *gatewayapi.ReturnRequest, opts ...grpc.CallOption) (*gatewayapi.ReturnResponse, error) { + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + return g.gateway.Return(ctx, in, opts...) +} + +func (g *gatewayClientForBuild) Inputs(ctx context.Context, in *gatewayapi.InputsRequest, opts ...grpc.CallOption) (*gatewayapi.InputsResponse, error) { + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + return g.gateway.Inputs(ctx, in, opts...) +} diff --git a/vendor/github.com/moby/buildkit/client/buildid/metadata.go b/vendor/github.com/moby/buildkit/client/buildid/metadata.go new file mode 100644 index 00000000..bb169b8f --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/buildid/metadata.go @@ -0,0 +1,29 @@ +package buildid + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +var metadataKey = "buildkit-controlapi-buildid" + +func AppendToOutgoingContext(ctx context.Context, id string) context.Context { + if id != "" { + return metadata.AppendToOutgoingContext(ctx, metadataKey, id) + } + return ctx +} + +func FromIncomingContext(ctx context.Context) string { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "" + } + + if ids := md.Get(metadataKey); len(ids) == 1 { + return ids[0] + } + + return "" +} diff --git a/vendor/github.com/moby/buildkit/client/client.go b/vendor/github.com/moby/buildkit/client/client.go new file mode 100644 index 00000000..6b824094 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/client.go @@ -0,0 +1,169 @@ +package client + +import ( + "context" + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net" + "time" + + "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/client/connhelper" + "github.com/moby/buildkit/util/appdefaults" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +type Client struct { + conn *grpc.ClientConn +} + +type ClientOpt interface{} + +// New returns a new buildkit client. Address can be empty for the system-default address. +func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) { + gopts := []grpc.DialOption{} + needDialer := true + needWithInsecure := true + for _, o := range opts { + if _, ok := o.(*withFailFast); ok { + gopts = append(gopts, grpc.FailOnNonTempDialError(true)) + } + if credInfo, ok := o.(*withCredentials); ok { + opt, err := loadCredentials(credInfo) + if err != nil { + return nil, err + } + gopts = append(gopts, opt) + needWithInsecure = false + } + if wt, ok := o.(*withTracer); ok { + gopts = append(gopts, + grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads())), + grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(wt.tracer))) + } + if wd, ok := o.(*withDialer); ok { + gopts = append(gopts, grpc.WithDialer(wd.dialer)) + needDialer = false + } + } + if needDialer { + dialFn, err := resolveDialer(address) + if err != nil { + return nil, err + } + // TODO(AkihiroSuda): use WithContextDialer (requires grpc 1.19) + // https://github.com/grpc/grpc-go/commit/40cb5618f475e7b9d61aa7920ae4b04ef9bbaf89 + gopts = append(gopts, grpc.WithDialer(dialFn)) + } + if needWithInsecure { + gopts = append(gopts, grpc.WithInsecure()) + } + if address == "" { + address = appdefaults.Address + } + conn, err := grpc.DialContext(ctx, address, gopts...) + if err != nil { + return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address) + } + c := &Client{ + conn: conn, + } + return c, nil +} + +func (c *Client) controlClient() controlapi.ControlClient { + return controlapi.NewControlClient(c.conn) +} + +func (c *Client) Close() error { + return c.conn.Close() +} + +type withFailFast struct{} + +func WithFailFast() ClientOpt { + return &withFailFast{} +} + +type withDialer struct { + dialer func(string, time.Duration) (net.Conn, error) +} + +func WithDialer(df func(string, time.Duration) (net.Conn, error)) ClientOpt { + return &withDialer{dialer: df} +} + +type withCredentials struct { + ServerName string + CACert string + Cert string + Key string +} + +// WithCredentials configures the TLS parameters of the client. +// Arguments: +// * serverName: specifies the name of the target server +// * ca: specifies the filepath of the CA certificate to use for verification +// * cert: specifies the filepath of the client certificate +// * key: specifies the filepath of the client key +func WithCredentials(serverName, ca, cert, key string) ClientOpt { + return &withCredentials{serverName, ca, cert, key} +} + +func loadCredentials(opts *withCredentials) (grpc.DialOption, error) { + ca, err := ioutil.ReadFile(opts.CACert) + if err != nil { + return nil, errors.Wrap(err, "could not read ca certificate") + } + + certPool := x509.NewCertPool() + if ok := certPool.AppendCertsFromPEM(ca); !ok { + return nil, errors.New("failed to append ca certs") + } + + cfg := &tls.Config{ + ServerName: opts.ServerName, + RootCAs: certPool, + } + + // we will produce an error if the user forgot about either cert or key if at least one is specified + if opts.Cert != "" || opts.Key != "" { + cert, err := tls.LoadX509KeyPair(opts.Cert, opts.Key) + if err != nil { + return nil, errors.Wrap(err, "could not read certificate/key") + } + cfg.Certificates = []tls.Certificate{cert} + cfg.BuildNameToCertificate() + } + + return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil +} + +func WithTracer(t opentracing.Tracer) ClientOpt { + return &withTracer{t} +} + +type withTracer struct { + tracer opentracing.Tracer +} + +func resolveDialer(address string) (func(string, time.Duration) (net.Conn, error), error) { + ch, err := connhelper.GetConnectionHelper(address) + if err != nil { + return nil, err + } + if ch != nil { + f := func(a string, _ time.Duration) (net.Conn, error) { + ctx := context.Background() + return ch.ContextDialer(ctx, a) + } + return f, nil + } + // basic dialer + return dialer, nil +} diff --git a/vendor/github.com/moby/buildkit/client/client_unix.go b/vendor/github.com/moby/buildkit/client/client_unix.go new file mode 100644 index 00000000..93afb956 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/client_unix.go @@ -0,0 +1,19 @@ +// +build !windows + +package client + +import ( + "net" + "strings" + "time" + + "github.com/pkg/errors" +) + +func dialer(address string, timeout time.Duration) (net.Conn, error) { + addrParts := strings.SplitN(address, "://", 2) + if len(addrParts) != 2 { + return nil, errors.Errorf("invalid address %s", address) + } + return net.DialTimeout(addrParts[0], addrParts[1], timeout) +} diff --git a/vendor/github.com/moby/buildkit/client/client_windows.go b/vendor/github.com/moby/buildkit/client/client_windows.go new file mode 100644 index 00000000..d0d8a1b4 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/client_windows.go @@ -0,0 +1,24 @@ +package client + +import ( + "net" + "strings" + "time" + + winio "github.com/Microsoft/go-winio" + "github.com/pkg/errors" +) + +func dialer(address string, timeout time.Duration) (net.Conn, error) { + addrParts := strings.SplitN(address, "://", 2) + if len(addrParts) != 2 { + return nil, errors.Errorf("invalid address %s", address) + } + switch addrParts[0] { + case "npipe": + address = strings.Replace(addrParts[1], "/", "\\", -1) + return winio.DialPipe(address, &timeout) + default: + return net.DialTimeout(addrParts[0], addrParts[1], timeout) + } +} diff --git a/vendor/github.com/moby/buildkit/client/connhelper/connhelper.go b/vendor/github.com/moby/buildkit/client/connhelper/connhelper.go new file mode 100644 index 00000000..e634bb5c --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/connhelper/connhelper.go @@ -0,0 +1,37 @@ +// Package connhelper provides helpers for connecting to a remote daemon host with custom logic. +package connhelper + +import ( + "context" + "net" + "net/url" +) + +var helpers = map[string]func(*url.URL) (*ConnectionHelper, error){} + +// ConnectionHelper allows to connect to a remote host with custom stream provider binary. +type ConnectionHelper struct { + // ContextDialer can be passed to grpc.WithContextDialer + ContextDialer func(ctx context.Context, addr string) (net.Conn, error) +} + +// GetConnectionHelper returns BuildKit-specific connection helper for the given URL. +// GetConnectionHelper returns nil without error when no helper is registered for the scheme. +func GetConnectionHelper(daemonURL string) (*ConnectionHelper, error) { + u, err := url.Parse(daemonURL) + if err != nil { + return nil, err + } + + fn, ok := helpers[u.Scheme] + if !ok { + return nil, nil + } + + return fn(u) +} + +// Register registers new connectionhelper for scheme +func Register(scheme string, fn func(*url.URL) (*ConnectionHelper, error)) { + helpers[scheme] = fn +} diff --git a/vendor/github.com/moby/buildkit/client/diskusage.go b/vendor/github.com/moby/buildkit/client/diskusage.go new file mode 100644 index 00000000..8034f977 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/diskusage.go @@ -0,0 +1,84 @@ +package client + +import ( + "context" + "sort" + "time" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/pkg/errors" +) + +type UsageInfo struct { + ID string + Mutable bool + InUse bool + Size int64 + + CreatedAt time.Time + LastUsedAt *time.Time + UsageCount int + Parent string + Description string + RecordType UsageRecordType + Shared bool +} + +func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) { + info := &DiskUsageInfo{} + for _, o := range opts { + o.SetDiskUsageOption(info) + } + + req := &controlapi.DiskUsageRequest{Filter: info.Filter} + resp, err := c.controlClient().DiskUsage(ctx, req) + if err != nil { + return nil, errors.Wrap(err, "failed to call diskusage") + } + + var du []*UsageInfo + + for _, d := range resp.Record { + du = append(du, &UsageInfo{ + ID: d.ID, + Mutable: d.Mutable, + InUse: d.InUse, + Size: d.Size_, + Parent: d.Parent, + CreatedAt: d.CreatedAt, + Description: d.Description, + UsageCount: int(d.UsageCount), + LastUsedAt: d.LastUsedAt, + RecordType: UsageRecordType(d.RecordType), + Shared: d.Shared, + }) + } + + sort.Slice(du, func(i, j int) bool { + if du[i].Size == du[j].Size { + return du[i].ID > du[j].ID + } + return du[i].Size > du[j].Size + }) + + return du, nil +} + +type DiskUsageOption interface { + SetDiskUsageOption(*DiskUsageInfo) +} + +type DiskUsageInfo struct { + Filter []string +} + +type UsageRecordType string + +const ( + UsageRecordTypeInternal UsageRecordType = "internal" + UsageRecordTypeFrontend UsageRecordType = "frontend" + UsageRecordTypeLocalSource UsageRecordType = "source.local" + UsageRecordTypeGitCheckout UsageRecordType = "source.git.checkout" + UsageRecordTypeCacheMount UsageRecordType = "exec.cachemount" + UsageRecordTypeRegular UsageRecordType = "regular" +) diff --git a/vendor/github.com/moby/buildkit/client/exporters.go b/vendor/github.com/moby/buildkit/client/exporters.go new file mode 100644 index 00000000..0f70d59c --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/exporters.go @@ -0,0 +1,9 @@ +package client + +const ( + ExporterImage = "image" + ExporterLocal = "local" + ExporterTar = "tar" + ExporterOCI = "oci" + ExporterDocker = "docker" +) diff --git a/vendor/github.com/moby/buildkit/client/filter.go b/vendor/github.com/moby/buildkit/client/filter.go new file mode 100644 index 00000000..b05fe59d --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/filter.go @@ -0,0 +1,19 @@ +package client + +func WithFilter(f []string) Filter { + return Filter(f) +} + +type Filter []string + +func (f Filter) SetDiskUsageOption(di *DiskUsageInfo) { + di.Filter = f +} + +func (f Filter) SetPruneOption(pi *PruneInfo) { + pi.Filter = f +} + +func (f Filter) SetListWorkersOption(lwi *ListWorkersInfo) { + lwi.Filter = f +} diff --git a/vendor/github.com/moby/buildkit/client/graph.go b/vendor/github.com/moby/buildkit/client/graph.go new file mode 100644 index 00000000..bcfa8b83 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/graph.go @@ -0,0 +1,46 @@ +package client + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +type Vertex struct { + Digest digest.Digest + Inputs []digest.Digest + Name string + Started *time.Time + Completed *time.Time + Cached bool + Error string +} + +type VertexStatus struct { + ID string + Vertex digest.Digest + Name string + Total int64 + Current int64 + Timestamp time.Time + Started *time.Time + Completed *time.Time +} + +type VertexLog struct { + Vertex digest.Digest + Stream int + Data []byte + Timestamp time.Time +} + +type SolveStatus struct { + Vertexes []*Vertex + Statuses []*VertexStatus + Logs []*VertexLog +} + +type SolveResponse struct { + // ExporterResponse is also used for CacheExporter + ExporterResponse map[string]string +} diff --git a/vendor/github.com/moby/buildkit/client/llb/definition.go b/vendor/github.com/moby/buildkit/client/llb/definition.go new file mode 100644 index 00000000..f21d2a59 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/definition.go @@ -0,0 +1,161 @@ +package llb + +import ( + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// DefinitionOp implements llb.Vertex using a marshalled definition. +// +// For example, after marshalling a LLB state and sending over the wire, the +// LLB state can be reconstructed from the definition. +type DefinitionOp struct { + MarshalCache + ops map[digest.Digest]*pb.Op + defs map[digest.Digest][]byte + metas map[digest.Digest]pb.OpMetadata + platforms map[digest.Digest]*specs.Platform + dgst digest.Digest + index pb.OutputIndex +} + +// NewDefinitionOp returns a new operation from a marshalled definition. +func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) { + ops := make(map[digest.Digest]*pb.Op) + defs := make(map[digest.Digest][]byte) + + var dgst digest.Digest + for _, dt := range def.Def { + var op pb.Op + if err := (&op).Unmarshal(dt); err != nil { + return nil, errors.Wrap(err, "failed to parse llb proto op") + } + dgst = digest.FromBytes(dt) + ops[dgst] = &op + defs[dgst] = dt + } + + var index pb.OutputIndex + if dgst != "" { + index = ops[dgst].Inputs[0].Index + dgst = ops[dgst].Inputs[0].Digest + } + + return &DefinitionOp{ + ops: ops, + defs: defs, + metas: def.Metadata, + platforms: make(map[digest.Digest]*specs.Platform), + dgst: dgst, + index: index, + }, nil +} + +func (d *DefinitionOp) ToInput(c *Constraints) (*pb.Input, error) { + return d.Output().ToInput(c) +} + +func (d *DefinitionOp) Vertex() Vertex { + return d +} + +func (d *DefinitionOp) Validate() error { + // Scratch state has no digest, ops or metas. + if d.dgst == "" { + return nil + } + + if len(d.ops) == 0 || len(d.defs) == 0 || len(d.metas) == 0 { + return errors.Errorf("invalid definition op with no ops %d %d", len(d.ops), len(d.metas)) + } + + _, ok := d.ops[d.dgst] + if !ok { + return errors.Errorf("invalid definition op with unknown op %q", d.dgst) + } + + _, ok = d.defs[d.dgst] + if !ok { + return errors.Errorf("invalid definition op with unknown def %q", d.dgst) + } + + _, ok = d.metas[d.dgst] + if !ok { + return errors.Errorf("invalid definition op with unknown metas %q", d.dgst) + } + + // It is possible for d.index >= len(d.ops[d.dgst]) when depending on scratch + // images. + if d.index < 0 { + return errors.Errorf("invalid definition op with invalid index") + } + + return nil +} + +func (d *DefinitionOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) { + if d.dgst == "" { + return "", nil, nil, errors.Errorf("cannot marshal empty definition op") + } + + if err := d.Validate(); err != nil { + return "", nil, nil, err + } + + meta := d.metas[d.dgst] + return d.dgst, d.defs[d.dgst], &meta, nil + +} + +func (d *DefinitionOp) Output() Output { + if d.dgst == "" { + return nil + } + + return &output{vertex: d, platform: d.platform(), getIndex: func() (pb.OutputIndex, error) { + return d.index, nil + }} +} + +func (d *DefinitionOp) Inputs() []Output { + if d.dgst == "" { + return nil + } + + var inputs []Output + + op := d.ops[d.dgst] + for _, input := range op.Inputs { + vtx := &DefinitionOp{ + ops: d.ops, + defs: d.defs, + metas: d.metas, + platforms: d.platforms, + dgst: input.Digest, + index: input.Index, + } + inputs = append(inputs, &output{vertex: vtx, platform: d.platform(), getIndex: func() (pb.OutputIndex, error) { + return pb.OutputIndex(vtx.index), nil + }}) + } + + return inputs +} + +func (d *DefinitionOp) platform() *specs.Platform { + platform, ok := d.platforms[d.dgst] + if ok { + return platform + } + + op := d.ops[d.dgst] + if op.Platform != nil { + spec := op.Platform.Spec() + platform = &spec + } + + d.platforms[d.dgst] = platform + return platform +} diff --git a/vendor/github.com/moby/buildkit/client/llb/exec.go b/vendor/github.com/moby/buildkit/client/llb/exec.go new file mode 100644 index 00000000..ade99278 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/exec.go @@ -0,0 +1,654 @@ +package llb + +import ( + _ "crypto/sha256" + "fmt" + "net" + "sort" + + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/system" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type Meta struct { + Args []string + Env EnvList + Cwd string + User string + ProxyEnv *ProxyEnv + ExtraHosts []HostIP + Network pb.NetMode + Security pb.SecurityMode +} + +func NewExecOp(root Output, meta Meta, readOnly bool, c Constraints) *ExecOp { + e := &ExecOp{meta: meta, constraints: c} + rootMount := &mount{ + target: pb.RootMount, + source: root, + readonly: readOnly, + } + e.mounts = append(e.mounts, rootMount) + if readOnly { + e.root = root + } else { + o := &output{vertex: e, getIndex: e.getMountIndexFn(rootMount)} + if p := c.Platform; p != nil { + o.platform = p + } + e.root = o + } + rootMount.output = e.root + return e +} + +type mount struct { + target string + readonly bool + source Output + output Output + selector string + cacheID string + tmpfs bool + cacheSharing CacheMountSharingMode + noOutput bool +} + +type ExecOp struct { + MarshalCache + root Output + mounts []*mount + meta Meta + constraints Constraints + isValidated bool + secrets []SecretInfo + ssh []SSHInfo +} + +func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Output { + m := &mount{ + target: target, + source: source, + } + for _, o := range opt { + o(m) + } + e.mounts = append(e.mounts, m) + if m.readonly { + m.output = source + } else if m.tmpfs { + m.output = &output{vertex: e, err: errors.Errorf("tmpfs mount for %s can't be used as a parent", target)} + } else if m.noOutput { + m.output = &output{vertex: e, err: errors.Errorf("mount marked no-output and %s can't be used as a parent", target)} + } else { + o := &output{vertex: e, getIndex: e.getMountIndexFn(m)} + if p := e.constraints.Platform; p != nil { + o.platform = p + } + m.output = o + } + e.Store(nil, nil, nil) + e.isValidated = false + return m.output +} + +func (e *ExecOp) GetMount(target string) Output { + for _, m := range e.mounts { + if m.target == target { + return m.output + } + } + return nil +} + +func (e *ExecOp) Validate() error { + if e.isValidated { + return nil + } + if len(e.meta.Args) == 0 { + return errors.Errorf("arguments are required") + } + if e.meta.Cwd == "" { + return errors.Errorf("working directory is required") + } + for _, m := range e.mounts { + if m.source != nil { + if err := m.source.Vertex().Validate(); err != nil { + return err + } + } + } + e.isValidated = true + return nil +} + +func (e *ExecOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) { + if e.Cached(c) { + return e.Load() + } + if err := e.Validate(); err != nil { + return "", nil, nil, err + } + // make sure mounts are sorted + sort.Slice(e.mounts, func(i, j int) bool { + return e.mounts[i].target < e.mounts[j].target + }) + + if len(e.ssh) > 0 { + for i, s := range e.ssh { + if s.Target == "" { + e.ssh[i].Target = fmt.Sprintf("/run/buildkit/ssh_agent.%d", i) + } + } + if _, ok := e.meta.Env.Get("SSH_AUTH_SOCK"); !ok { + e.meta.Env = e.meta.Env.AddOrReplace("SSH_AUTH_SOCK", e.ssh[0].Target) + } + } + if c.Caps != nil { + if err := c.Caps.Supports(pb.CapExecMetaSetsDefaultPath); err != nil { + e.meta.Env = e.meta.Env.SetDefault("PATH", system.DefaultPathEnv) + } else { + addCap(&e.constraints, pb.CapExecMetaSetsDefaultPath) + } + } + + meta := &pb.Meta{ + Args: e.meta.Args, + Env: e.meta.Env.ToArray(), + Cwd: e.meta.Cwd, + User: e.meta.User, + } + if len(e.meta.ExtraHosts) > 0 { + hosts := make([]*pb.HostIP, len(e.meta.ExtraHosts)) + for i, h := range e.meta.ExtraHosts { + hosts[i] = &pb.HostIP{Host: h.Host, IP: h.IP.String()} + } + meta.ExtraHosts = hosts + } + + peo := &pb.ExecOp{ + Meta: meta, + Network: e.meta.Network, + Security: e.meta.Security, + } + if e.meta.Network != NetModeSandbox { + addCap(&e.constraints, pb.CapExecMetaNetwork) + } + + if e.meta.Security != SecurityModeSandbox { + addCap(&e.constraints, pb.CapExecMetaSecurity) + } + + if p := e.meta.ProxyEnv; p != nil { + peo.Meta.ProxyEnv = &pb.ProxyEnv{ + HttpProxy: p.HttpProxy, + HttpsProxy: p.HttpsProxy, + FtpProxy: p.FtpProxy, + NoProxy: p.NoProxy, + } + addCap(&e.constraints, pb.CapExecMetaProxy) + } + + addCap(&e.constraints, pb.CapExecMetaBase) + + for _, m := range e.mounts { + if m.selector != "" { + addCap(&e.constraints, pb.CapExecMountSelector) + } + if m.cacheID != "" { + addCap(&e.constraints, pb.CapExecMountCache) + addCap(&e.constraints, pb.CapExecMountCacheSharing) + } else if m.tmpfs { + addCap(&e.constraints, pb.CapExecMountTmpfs) + } else if m.source != nil { + addCap(&e.constraints, pb.CapExecMountBind) + } + } + + if len(e.secrets) > 0 { + addCap(&e.constraints, pb.CapExecMountSecret) + } + + if len(e.ssh) > 0 { + addCap(&e.constraints, pb.CapExecMountSSH) + } + + pop, md := MarshalConstraints(c, &e.constraints) + pop.Op = &pb.Op_Exec{ + Exec: peo, + } + + outIndex := 0 + for _, m := range e.mounts { + inputIndex := pb.InputIndex(len(pop.Inputs)) + if m.source != nil { + if m.tmpfs { + return "", nil, nil, errors.Errorf("tmpfs mounts must use scratch") + } + inp, err := m.source.ToInput(c) + if err != nil { + return "", nil, nil, err + } + + newInput := true + + for i, inp2 := range pop.Inputs { + if *inp == *inp2 { + inputIndex = pb.InputIndex(i) + newInput = false + break + } + } + + if newInput { + pop.Inputs = append(pop.Inputs, inp) + } + } else { + inputIndex = pb.Empty + } + + outputIndex := pb.OutputIndex(-1) + if !m.noOutput && !m.readonly && m.cacheID == "" && !m.tmpfs { + outputIndex = pb.OutputIndex(outIndex) + outIndex++ + } + + pm := &pb.Mount{ + Input: inputIndex, + Dest: m.target, + Readonly: m.readonly, + Output: outputIndex, + Selector: m.selector, + } + if m.cacheID != "" { + pm.MountType = pb.MountType_CACHE + pm.CacheOpt = &pb.CacheOpt{ + ID: m.cacheID, + } + switch m.cacheSharing { + case CacheMountShared: + pm.CacheOpt.Sharing = pb.CacheSharingOpt_SHARED + case CacheMountPrivate: + pm.CacheOpt.Sharing = pb.CacheSharingOpt_PRIVATE + case CacheMountLocked: + pm.CacheOpt.Sharing = pb.CacheSharingOpt_LOCKED + } + } + if m.tmpfs { + pm.MountType = pb.MountType_TMPFS + } + peo.Mounts = append(peo.Mounts, pm) + } + + for _, s := range e.secrets { + pm := &pb.Mount{ + Dest: s.Target, + MountType: pb.MountType_SECRET, + SecretOpt: &pb.SecretOpt{ + ID: s.ID, + Uid: uint32(s.UID), + Gid: uint32(s.GID), + Optional: s.Optional, + Mode: uint32(s.Mode), + }, + } + peo.Mounts = append(peo.Mounts, pm) + } + + for _, s := range e.ssh { + pm := &pb.Mount{ + Dest: s.Target, + MountType: pb.MountType_SSH, + SSHOpt: &pb.SSHOpt{ + ID: s.ID, + Uid: uint32(s.UID), + Gid: uint32(s.GID), + Mode: uint32(s.Mode), + Optional: s.Optional, + }, + } + peo.Mounts = append(peo.Mounts, pm) + } + + dt, err := pop.Marshal() + if err != nil { + return "", nil, nil, err + } + e.Store(dt, md, c) + return e.Load() +} + +func (e *ExecOp) Output() Output { + return e.root +} + +func (e *ExecOp) Inputs() (inputs []Output) { + mm := map[Output]struct{}{} + for _, m := range e.mounts { + if m.source != nil { + mm[m.source] = struct{}{} + } + } + for o := range mm { + inputs = append(inputs, o) + } + return +} + +func (e *ExecOp) getMountIndexFn(m *mount) func() (pb.OutputIndex, error) { + return func() (pb.OutputIndex, error) { + // make sure mounts are sorted + sort.Slice(e.mounts, func(i, j int) bool { + return e.mounts[i].target < e.mounts[j].target + }) + + i := 0 + for _, m2 := range e.mounts { + if m2.noOutput || m2.readonly || m2.cacheID != "" { + continue + } + if m == m2 { + return pb.OutputIndex(i), nil + } + i++ + } + return pb.OutputIndex(0), errors.Errorf("invalid mount: %s", m.target) + } +} + +type ExecState struct { + State + exec *ExecOp +} + +func (e ExecState) AddMount(target string, source State, opt ...MountOption) State { + return source.WithOutput(e.exec.AddMount(target, source.Output(), opt...)) +} + +func (e ExecState) GetMount(target string) State { + return NewState(e.exec.GetMount(target)) +} + +func (e ExecState) Root() State { + return e.State +} + +type MountOption func(*mount) + +func Readonly(m *mount) { + m.readonly = true +} + +func SourcePath(src string) MountOption { + return func(m *mount) { + m.selector = src + } +} + +func ForceNoOutput(m *mount) { + m.noOutput = true +} + +func AsPersistentCacheDir(id string, sharing CacheMountSharingMode) MountOption { + return func(m *mount) { + m.cacheID = id + m.cacheSharing = sharing + } +} + +func Tmpfs() MountOption { + return func(m *mount) { + m.tmpfs = true + } +} + +type RunOption interface { + SetRunOption(es *ExecInfo) +} + +type runOptionFunc func(*ExecInfo) + +func (fn runOptionFunc) SetRunOption(ei *ExecInfo) { + fn(ei) +} + +func Network(n pb.NetMode) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = network(n)(ei.State) + }) +} + +func Security(s pb.SecurityMode) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = security(s)(ei.State) + }) +} + +func Shlex(str string) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = shlexf(str, false)(ei.State) + }) +} +func Shlexf(str string, v ...interface{}) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = shlexf(str, true, v...)(ei.State) + }) +} + +func Args(a []string) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = args(a...)(ei.State) + }) +} + +func AddEnv(key, value string) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.AddEnv(key, value) + }) +} + +func AddEnvf(key, value string, v ...interface{}) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.AddEnvf(key, value, v...) + }) +} + +func User(str string) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.User(str) + }) +} + +func Dir(str string) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.Dir(str) + }) +} +func Dirf(str string, v ...interface{}) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.Dirf(str, v...) + }) +} + +func AddExtraHost(host string, ip net.IP) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.AddExtraHost(host, ip) + }) +} + +func Reset(s State) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.Reset(s) + }) +} + +func With(so ...StateOption) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.With(so...) + }) +} + +func AddMount(dest string, mountState State, opts ...MountOption) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.Mounts = append(ei.Mounts, MountInfo{dest, mountState.Output(), opts}) + }) +} + +func AddSSHSocket(opts ...SSHOption) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + s := &SSHInfo{ + Mode: 0600, + } + for _, opt := range opts { + opt.SetSSHOption(s) + } + ei.SSH = append(ei.SSH, *s) + }) +} + +type SSHOption interface { + SetSSHOption(*SSHInfo) +} + +type sshOptionFunc func(*SSHInfo) + +func (fn sshOptionFunc) SetSSHOption(si *SSHInfo) { + fn(si) +} + +func SSHID(id string) SSHOption { + return sshOptionFunc(func(si *SSHInfo) { + si.ID = id + }) +} + +func SSHSocketTarget(target string) SSHOption { + return sshOptionFunc(func(si *SSHInfo) { + si.Target = target + }) +} + +func SSHSocketOpt(target string, uid, gid, mode int) SSHOption { + return sshOptionFunc(func(si *SSHInfo) { + si.Target = target + si.UID = uid + si.GID = gid + si.Mode = mode + }) +} + +var SSHOptional = sshOptionFunc(func(si *SSHInfo) { + si.Optional = true +}) + +type SSHInfo struct { + ID string + Target string + Mode int + UID int + GID int + Optional bool +} + +func AddSecret(dest string, opts ...SecretOption) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + s := &SecretInfo{ID: dest, Target: dest, Mode: 0400} + for _, opt := range opts { + opt.SetSecretOption(s) + } + ei.Secrets = append(ei.Secrets, *s) + }) +} + +type SecretOption interface { + SetSecretOption(*SecretInfo) +} + +type secretOptionFunc func(*SecretInfo) + +func (fn secretOptionFunc) SetSecretOption(si *SecretInfo) { + fn(si) +} + +type SecretInfo struct { + ID string + Target string + Mode int + UID int + GID int + Optional bool +} + +var SecretOptional = secretOptionFunc(func(si *SecretInfo) { + si.Optional = true +}) + +func SecretID(id string) SecretOption { + return secretOptionFunc(func(si *SecretInfo) { + si.ID = id + }) +} + +func SecretFileOpt(uid, gid, mode int) SecretOption { + return secretOptionFunc(func(si *SecretInfo) { + si.UID = uid + si.GID = gid + si.Mode = mode + }) +} + +func ReadonlyRootFS() RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.ReadonlyRootFS = true + }) +} + +func WithProxy(ps ProxyEnv) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.ProxyEnv = &ps + }) +} + +type ExecInfo struct { + constraintsWrapper + State State + Mounts []MountInfo + ReadonlyRootFS bool + ProxyEnv *ProxyEnv + Secrets []SecretInfo + SSH []SSHInfo +} + +type MountInfo struct { + Target string + Source Output + Opts []MountOption +} + +type ProxyEnv struct { + HttpProxy string + HttpsProxy string + FtpProxy string + NoProxy string +} + +type CacheMountSharingMode int + +const ( + CacheMountShared CacheMountSharingMode = iota + CacheMountPrivate + CacheMountLocked +) + +const ( + NetModeSandbox = pb.NetMode_UNSET + NetModeHost = pb.NetMode_HOST + NetModeNone = pb.NetMode_NONE +) + +const ( + SecurityModeInsecure = pb.SecurityMode_INSECURE + SecurityModeSandbox = pb.SecurityMode_SANDBOX +) diff --git a/vendor/github.com/moby/buildkit/client/llb/fileop.go b/vendor/github.com/moby/buildkit/client/llb/fileop.go new file mode 100644 index 00000000..c95a1d65 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/fileop.go @@ -0,0 +1,727 @@ +package llb + +import ( + _ "crypto/sha256" + "os" + "path" + "strconv" + "strings" + "time" + + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Examples: +// local := llb.Local(...) +// llb.Image().Dir("/abc").File(Mkdir("./foo").Mkfile("/abc/foo/bar", []byte("data"))) +// llb.Image().File(Mkdir("/foo").Mkfile("/foo/bar", []byte("data"))) +// llb.Image().File(Copy(local, "/foo", "/bar")).File(Copy(local, "/foo2", "/bar2")) +// +// a := Mkdir("./foo") // *FileAction /ced/foo +// b := Mkdir("./bar") // /abc/bar +// c := b.Copy(a.WithState(llb.Scratch().Dir("/ced")), "./foo", "./baz") // /abc/baz +// llb.Image().Dir("/abc").File(c) +// +// In future this can be extended to multiple outputs with: +// a := Mkdir("./foo") +// b, id := a.GetSelector() +// c := b.Mkdir("./bar") +// filestate = state.File(c) +// filestate.GetOutput(id).Exec() + +func NewFileOp(s State, action *FileAction, c Constraints) *FileOp { + action = action.bind(s) + + f := &FileOp{ + action: action, + constraints: c, + } + + f.output = &output{vertex: f, getIndex: func() (pb.OutputIndex, error) { + return pb.OutputIndex(0), nil + }} + + return f +} + +// CopyInput is either llb.State or *FileActionWithState +type CopyInput interface { + isFileOpCopyInput() +} + +type subAction interface { + toProtoAction(string, pb.InputIndex) pb.IsFileAction +} + +type FileAction struct { + state *State + prev *FileAction + action subAction + err error +} + +func (fa *FileAction) Mkdir(p string, m os.FileMode, opt ...MkdirOption) *FileAction { + a := Mkdir(p, m, opt...) + a.prev = fa + return a +} + +func (fa *FileAction) Mkfile(p string, m os.FileMode, dt []byte, opt ...MkfileOption) *FileAction { + a := Mkfile(p, m, dt, opt...) + a.prev = fa + return a +} + +func (fa *FileAction) Rm(p string, opt ...RmOption) *FileAction { + a := Rm(p, opt...) + a.prev = fa + return a +} + +func (fa *FileAction) Copy(input CopyInput, src, dest string, opt ...CopyOption) *FileAction { + a := Copy(input, src, dest, opt...) + a.prev = fa + return a +} + +func (fa *FileAction) allOutputs(m map[Output]struct{}) { + if fa == nil { + return + } + if fa.state != nil && fa.state.Output() != nil { + m[fa.state.Output()] = struct{}{} + } + + if a, ok := fa.action.(*fileActionCopy); ok { + if a.state != nil { + if out := a.state.Output(); out != nil { + m[out] = struct{}{} + } + } else if a.fas != nil { + a.fas.allOutputs(m) + } + } + fa.prev.allOutputs(m) +} + +func (fa *FileAction) bind(s State) *FileAction { + if fa == nil { + return nil + } + fa2 := *fa + fa2.prev = fa.prev.bind(s) + fa2.state = &s + return &fa2 +} + +func (fa *FileAction) WithState(s State) CopyInput { + return &fileActionWithState{FileAction: fa.bind(s)} +} + +type fileActionWithState struct { + *FileAction +} + +func (fas *fileActionWithState) isFileOpCopyInput() {} + +func Mkdir(p string, m os.FileMode, opt ...MkdirOption) *FileAction { + var mi MkdirInfo + for _, o := range opt { + o.SetMkdirOption(&mi) + } + return &FileAction{ + action: &fileActionMkdir{ + file: p, + mode: m, + info: mi, + }, + } +} + +type fileActionMkdir struct { + file string + mode os.FileMode + info MkdirInfo +} + +func (a *fileActionMkdir) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction { + return &pb.FileAction_Mkdir{ + Mkdir: &pb.FileActionMkDir{ + Path: normalizePath(parent, a.file, false), + Mode: int32(a.mode & 0777), + MakeParents: a.info.MakeParents, + Owner: a.info.ChownOpt.marshal(base), + Timestamp: marshalTime(a.info.CreatedTime), + }, + } +} + +type MkdirOption interface { + SetMkdirOption(*MkdirInfo) +} + +type ChownOption interface { + MkdirOption + MkfileOption + CopyOption +} + +type mkdirOptionFunc func(*MkdirInfo) + +func (fn mkdirOptionFunc) SetMkdirOption(mi *MkdirInfo) { + fn(mi) +} + +var _ MkdirOption = &MkdirInfo{} + +func WithParents(b bool) MkdirOption { + return mkdirOptionFunc(func(mi *MkdirInfo) { + mi.MakeParents = b + }) +} + +type MkdirInfo struct { + MakeParents bool + ChownOpt *ChownOpt + CreatedTime *time.Time +} + +func (mi *MkdirInfo) SetMkdirOption(mi2 *MkdirInfo) { + *mi2 = *mi +} + +func WithUser(name string) ChownOption { + opt := ChownOpt{} + + parts := strings.SplitN(name, ":", 2) + for i, v := range parts { + switch i { + case 0: + uid, err := parseUID(v) + if err != nil { + opt.User = &UserOpt{Name: v} + } else { + opt.User = &UserOpt{UID: uid} + } + case 1: + gid, err := parseUID(v) + if err != nil { + opt.Group = &UserOpt{Name: v} + } else { + opt.Group = &UserOpt{UID: gid} + } + } + } + + return opt +} + +func parseUID(str string) (int, error) { + if str == "root" { + return 0, nil + } + uid, err := strconv.ParseInt(str, 10, 32) + if err != nil { + return 0, err + } + return int(uid), nil +} + +func WithUIDGID(uid, gid int) ChownOption { + return ChownOpt{ + User: &UserOpt{UID: uid}, + Group: &UserOpt{UID: gid}, + } +} + +type ChownOpt struct { + User *UserOpt + Group *UserOpt +} + +func (co ChownOpt) SetMkdirOption(mi *MkdirInfo) { + mi.ChownOpt = &co +} +func (co ChownOpt) SetMkfileOption(mi *MkfileInfo) { + mi.ChownOpt = &co +} +func (co ChownOpt) SetCopyOption(mi *CopyInfo) { + mi.ChownOpt = &co +} + +func (cp *ChownOpt) marshal(base pb.InputIndex) *pb.ChownOpt { + if cp == nil { + return nil + } + return &pb.ChownOpt{ + User: cp.User.marshal(base), + Group: cp.Group.marshal(base), + } +} + +type UserOpt struct { + UID int + Name string +} + +func (up *UserOpt) marshal(base pb.InputIndex) *pb.UserOpt { + if up == nil { + return nil + } + if up.Name != "" { + return &pb.UserOpt{User: &pb.UserOpt_ByName{ByName: &pb.NamedUserOpt{ + Name: up.Name, Input: base}}} + } + return &pb.UserOpt{User: &pb.UserOpt_ByID{ByID: uint32(up.UID)}} +} + +func Mkfile(p string, m os.FileMode, dt []byte, opts ...MkfileOption) *FileAction { + var mi MkfileInfo + for _, o := range opts { + o.SetMkfileOption(&mi) + } + + return &FileAction{ + action: &fileActionMkfile{ + file: p, + mode: m, + dt: dt, + info: mi, + }, + } +} + +type MkfileOption interface { + SetMkfileOption(*MkfileInfo) +} + +type MkfileInfo struct { + ChownOpt *ChownOpt + CreatedTime *time.Time +} + +func (mi *MkfileInfo) SetMkfileOption(mi2 *MkfileInfo) { + *mi2 = *mi +} + +var _ MkfileOption = &MkfileInfo{} + +type fileActionMkfile struct { + file string + mode os.FileMode + dt []byte + info MkfileInfo +} + +func (a *fileActionMkfile) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction { + return &pb.FileAction_Mkfile{ + Mkfile: &pb.FileActionMkFile{ + Path: normalizePath(parent, a.file, false), + Mode: int32(a.mode & 0777), + Data: a.dt, + Owner: a.info.ChownOpt.marshal(base), + Timestamp: marshalTime(a.info.CreatedTime), + }, + } +} + +func Rm(p string, opts ...RmOption) *FileAction { + var mi RmInfo + for _, o := range opts { + o.SetRmOption(&mi) + } + + return &FileAction{ + action: &fileActionRm{ + file: p, + info: mi, + }, + } +} + +type RmOption interface { + SetRmOption(*RmInfo) +} + +type rmOptionFunc func(*RmInfo) + +func (fn rmOptionFunc) SetRmOption(mi *RmInfo) { + fn(mi) +} + +type RmInfo struct { + AllowNotFound bool + AllowWildcard bool +} + +func (mi *RmInfo) SetRmOption(mi2 *RmInfo) { + *mi2 = *mi +} + +var _ RmOption = &RmInfo{} + +func WithAllowNotFound(b bool) RmOption { + return rmOptionFunc(func(mi *RmInfo) { + mi.AllowNotFound = b + }) +} + +func WithAllowWildcard(b bool) RmOption { + return rmOptionFunc(func(mi *RmInfo) { + mi.AllowWildcard = b + }) +} + +type fileActionRm struct { + file string + info RmInfo +} + +func (a *fileActionRm) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction { + return &pb.FileAction_Rm{ + Rm: &pb.FileActionRm{ + Path: normalizePath(parent, a.file, false), + AllowNotFound: a.info.AllowNotFound, + AllowWildcard: a.info.AllowWildcard, + }, + } +} + +func Copy(input CopyInput, src, dest string, opts ...CopyOption) *FileAction { + var state *State + var fas *fileActionWithState + var err error + if st, ok := input.(State); ok { + state = &st + } else if v, ok := input.(*fileActionWithState); ok { + fas = v + } else { + err = errors.Errorf("invalid input type %T for copy", input) + } + + var mi CopyInfo + for _, o := range opts { + o.SetCopyOption(&mi) + } + + return &FileAction{ + action: &fileActionCopy{ + state: state, + fas: fas, + src: src, + dest: dest, + info: mi, + }, + err: err, + } +} + +type CopyOption interface { + SetCopyOption(*CopyInfo) +} + +type CopyInfo struct { + Mode *os.FileMode + FollowSymlinks bool + CopyDirContentsOnly bool + AttemptUnpack bool + CreateDestPath bool + AllowWildcard bool + AllowEmptyWildcard bool + ChownOpt *ChownOpt + CreatedTime *time.Time +} + +func (mi *CopyInfo) SetCopyOption(mi2 *CopyInfo) { + *mi2 = *mi +} + +var _ CopyOption = &CopyInfo{} + +type fileActionCopy struct { + state *State + fas *fileActionWithState + src string + dest string + info CopyInfo +} + +func (a *fileActionCopy) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction { + c := &pb.FileActionCopy{ + Src: a.sourcePath(), + Dest: normalizePath(parent, a.dest, true), + Owner: a.info.ChownOpt.marshal(base), + AllowWildcard: a.info.AllowWildcard, + AllowEmptyWildcard: a.info.AllowEmptyWildcard, + FollowSymlink: a.info.FollowSymlinks, + DirCopyContents: a.info.CopyDirContentsOnly, + AttemptUnpackDockerCompatibility: a.info.AttemptUnpack, + CreateDestPath: a.info.CreateDestPath, + Timestamp: marshalTime(a.info.CreatedTime), + } + if a.info.Mode != nil { + c.Mode = int32(*a.info.Mode) + } else { + c.Mode = -1 + } + return &pb.FileAction_Copy{ + Copy: c, + } +} + +func (c *fileActionCopy) sourcePath() string { + p := path.Clean(c.src) + if !path.IsAbs(p) { + if c.state != nil { + p = path.Join("/", c.state.GetDir(), p) + } else if c.fas != nil { + p = path.Join("/", c.fas.state.GetDir(), p) + } + } + return p +} + +type CreatedTime time.Time + +func WithCreatedTime(t time.Time) CreatedTime { + return CreatedTime(t) +} + +func (c CreatedTime) SetMkdirOption(mi *MkdirInfo) { + mi.CreatedTime = (*time.Time)(&c) +} + +func (c CreatedTime) SetMkfileOption(mi *MkfileInfo) { + mi.CreatedTime = (*time.Time)(&c) +} + +func (c CreatedTime) SetCopyOption(mi *CopyInfo) { + mi.CreatedTime = (*time.Time)(&c) +} + +func marshalTime(t *time.Time) int64 { + if t == nil { + return -1 + } + return t.UnixNano() +} + +type FileOp struct { + MarshalCache + action *FileAction + output Output + + constraints Constraints + isValidated bool +} + +func (f *FileOp) Validate() error { + if f.isValidated { + return nil + } + if f.action == nil { + return errors.Errorf("action is required") + } + f.isValidated = true + return nil +} + +type marshalState struct { + visited map[*FileAction]*fileActionState + inputs []*pb.Input + actions []*fileActionState +} + +func newMarshalState() *marshalState { + return &marshalState{ + visited: map[*FileAction]*fileActionState{}, + } +} + +type fileActionState struct { + base pb.InputIndex + input pb.InputIndex + inputRelative *int + input2 pb.InputIndex + input2Relative *int + target int + action subAction + fa *FileAction +} + +func (ms *marshalState) addInput(st *fileActionState, c *Constraints, o Output) (pb.InputIndex, error) { + inp, err := o.ToInput(c) + if err != nil { + return 0, err + } + for i, inp2 := range ms.inputs { + if *inp == *inp2 { + return pb.InputIndex(i), nil + } + } + i := pb.InputIndex(len(ms.inputs)) + ms.inputs = append(ms.inputs, inp) + return i, nil +} + +func (ms *marshalState) add(fa *FileAction, c *Constraints) (*fileActionState, error) { + if st, ok := ms.visited[fa]; ok { + return st, nil + } + + if fa.err != nil { + return nil, fa.err + } + + var prevState *fileActionState + if parent := fa.prev; parent != nil { + var err error + prevState, err = ms.add(parent, c) + if err != nil { + return nil, err + } + } + + st := &fileActionState{ + action: fa.action, + input: -1, + input2: -1, + base: -1, + fa: fa, + } + + if source := fa.state.Output(); source != nil { + inp, err := ms.addInput(st, c, source) + if err != nil { + return nil, err + } + st.base = inp + } + + if fa.prev == nil { + st.input = st.base + } else { + st.inputRelative = &prevState.target + } + + if a, ok := fa.action.(*fileActionCopy); ok { + if a.state != nil { + if out := a.state.Output(); out != nil { + inp, err := ms.addInput(st, c, out) + if err != nil { + return nil, err + } + st.input2 = inp + } + } else if a.fas != nil { + src, err := ms.add(a.fas.FileAction, c) + if err != nil { + return nil, err + } + st.input2Relative = &src.target + } else { + return nil, errors.Errorf("invalid empty source for copy") + } + } + + st.target = len(ms.actions) + + ms.visited[fa] = st + ms.actions = append(ms.actions, st) + + return st, nil +} + +func (f *FileOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) { + if f.Cached(c) { + return f.Load() + } + if err := f.Validate(); err != nil { + return "", nil, nil, err + } + + addCap(&f.constraints, pb.CapFileBase) + + pfo := &pb.FileOp{} + + pop, md := MarshalConstraints(c, &f.constraints) + pop.Op = &pb.Op_File{ + File: pfo, + } + + state := newMarshalState() + _, err := state.add(f.action, c) + if err != nil { + return "", nil, nil, err + } + pop.Inputs = state.inputs + + for i, st := range state.actions { + output := pb.OutputIndex(-1) + if i+1 == len(state.actions) { + output = 0 + } + + var parent string + if st.fa.state != nil { + parent = st.fa.state.GetDir() + } + + pfo.Actions = append(pfo.Actions, &pb.FileAction{ + Input: getIndex(st.input, len(state.inputs), st.inputRelative), + SecondaryInput: getIndex(st.input2, len(state.inputs), st.input2Relative), + Output: output, + Action: st.action.toProtoAction(parent, st.base), + }) + } + + dt, err := pop.Marshal() + if err != nil { + return "", nil, nil, err + } + f.Store(dt, md, c) + return f.Load() +} + +func normalizePath(parent, p string, keepSlash bool) string { + origPath := p + p = path.Clean(p) + if !path.IsAbs(p) { + p = path.Join("/", parent, p) + } + if keepSlash { + if strings.HasSuffix(origPath, "/") && !strings.HasSuffix(p, "/") { + p += "/" + } else if strings.HasSuffix(origPath, "/.") { + if p != "/" { + p += "/" + } + p += "." + } + } + return p +} + +func (f *FileOp) Output() Output { + return f.output +} + +func (f *FileOp) Inputs() (inputs []Output) { + mm := map[Output]struct{}{} + + f.action.allOutputs(mm) + + for o := range mm { + inputs = append(inputs, o) + } + return inputs +} + +func getIndex(input pb.InputIndex, len int, relative *int) pb.InputIndex { + if relative != nil { + return pb.InputIndex(len + *relative) + } + return input +} diff --git a/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go b/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go new file mode 100644 index 00000000..0dbd4737 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go @@ -0,0 +1,109 @@ +package imagemetaresolver + +import ( + "context" + "net/http" + "sync" + + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + "github.com/docker/docker/pkg/locker" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/imageutil" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +var defaultImageMetaResolver llb.ImageMetaResolver +var defaultImageMetaResolverOnce sync.Once + +var WithDefault = imageOptionFunc(func(ii *llb.ImageInfo) { + llb.WithMetaResolver(Default()).SetImageOption(ii) +}) + +type imageMetaResolverOpts struct { + platform *specs.Platform +} + +type ImageMetaResolverOpt func(o *imageMetaResolverOpts) + +func WithDefaultPlatform(p *specs.Platform) ImageMetaResolverOpt { + return func(o *imageMetaResolverOpts) { + o.platform = p + } +} + +func New(with ...ImageMetaResolverOpt) llb.ImageMetaResolver { + var opts imageMetaResolverOpts + for _, f := range with { + f(&opts) + } + return &imageMetaResolver{ + resolver: docker.NewResolver(docker.ResolverOptions{ + Client: http.DefaultClient, + }), + platform: opts.platform, + buffer: contentutil.NewBuffer(), + cache: map[string]resolveResult{}, + locker: locker.New(), + } +} + +func Default() llb.ImageMetaResolver { + defaultImageMetaResolverOnce.Do(func() { + defaultImageMetaResolver = New() + }) + return defaultImageMetaResolver +} + +type imageMetaResolver struct { + resolver remotes.Resolver + buffer contentutil.Buffer + platform *specs.Platform + locker *locker.Locker + cache map[string]resolveResult +} + +type resolveResult struct { + config []byte + dgst digest.Digest +} + +func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) { + imr.locker.Lock(ref) + defer imr.locker.Unlock(ref) + + platform := opt.Platform + if platform == nil { + platform = imr.platform + } + + k := imr.key(ref, platform) + + if res, ok := imr.cache[k]; ok { + return res.dgst, res.config, nil + } + + dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, nil, platform) + if err != nil { + return "", nil, err + } + + imr.cache[k] = resolveResult{dgst: dgst, config: config} + return dgst, config, nil +} + +func (imr *imageMetaResolver) key(ref string, platform *specs.Platform) string { + if platform != nil { + ref += platforms.Format(*platform) + } + return ref +} + +type imageOptionFunc func(*llb.ImageInfo) + +func (fn imageOptionFunc) SetImageOption(ii *llb.ImageInfo) { + fn(ii) +} diff --git a/vendor/github.com/moby/buildkit/client/llb/marshal.go b/vendor/github.com/moby/buildkit/client/llb/marshal.go new file mode 100644 index 00000000..65a352fa --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/marshal.go @@ -0,0 +1,112 @@ +package llb + +import ( + "io" + "io/ioutil" + + "github.com/containerd/containerd/platforms" + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" +) + +// Definition is the LLB definition structure with per-vertex metadata entries +// Corresponds to the Definition structure defined in solver/pb.Definition. +type Definition struct { + Def [][]byte + Metadata map[digest.Digest]pb.OpMetadata +} + +func (def *Definition) ToPB() *pb.Definition { + md := make(map[digest.Digest]pb.OpMetadata) + for k, v := range def.Metadata { + md[k] = v + } + return &pb.Definition{ + Def: def.Def, + Metadata: md, + } +} + +func (def *Definition) FromPB(x *pb.Definition) { + def.Def = x.Def + def.Metadata = make(map[digest.Digest]pb.OpMetadata) + for k, v := range x.Metadata { + def.Metadata[k] = v + } +} + +func WriteTo(def *Definition, w io.Writer) error { + b, err := def.ToPB().Marshal() + if err != nil { + return err + } + _, err = w.Write(b) + return err +} + +func ReadFrom(r io.Reader) (*Definition, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + var pbDef pb.Definition + if err := pbDef.Unmarshal(b); err != nil { + return nil, err + } + var def Definition + def.FromPB(&pbDef) + return &def, nil +} + +func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) { + c := *base + c.WorkerConstraints = append([]string{}, c.WorkerConstraints...) + + if p := override.Platform; p != nil { + c.Platform = p + } + + for _, wc := range override.WorkerConstraints { + c.WorkerConstraints = append(c.WorkerConstraints, wc) + } + + c.Metadata = mergeMetadata(c.Metadata, override.Metadata) + + if c.Platform == nil { + defaultPlatform := platforms.Normalize(platforms.DefaultSpec()) + c.Platform = &defaultPlatform + } + + return &pb.Op{ + Platform: &pb.Platform{ + OS: c.Platform.OS, + Architecture: c.Platform.Architecture, + Variant: c.Platform.Variant, + OSVersion: c.Platform.OSVersion, + OSFeatures: c.Platform.OSFeatures, + }, + Constraints: &pb.WorkerConstraints{ + Filter: c.WorkerConstraints, + }, + }, &c.Metadata +} + +type MarshalCache struct { + digest digest.Digest + dt []byte + md *pb.OpMetadata + constraints *Constraints +} + +func (mc *MarshalCache) Cached(c *Constraints) bool { + return mc.dt != nil && mc.constraints == c +} +func (mc *MarshalCache) Load() (digest.Digest, []byte, *pb.OpMetadata, error) { + return mc.digest, mc.dt, mc.md, nil +} +func (mc *MarshalCache) Store(dt []byte, md *pb.OpMetadata, c *Constraints) { + mc.digest = digest.FromBytes(dt) + mc.dt = dt + mc.md = md + mc.constraints = c +} diff --git a/vendor/github.com/moby/buildkit/client/llb/meta.go b/vendor/github.com/moby/buildkit/client/llb/meta.go new file mode 100644 index 00000000..54b14c4c --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/meta.go @@ -0,0 +1,233 @@ +package llb + +import ( + "fmt" + "net" + "path" + + "github.com/containerd/containerd/platforms" + "github.com/google/shlex" + "github.com/moby/buildkit/solver/pb" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +type contextKeyT string + +var ( + keyArgs = contextKeyT("llb.exec.args") + keyDir = contextKeyT("llb.exec.dir") + keyEnv = contextKeyT("llb.exec.env") + keyUser = contextKeyT("llb.exec.user") + keyExtraHost = contextKeyT("llb.exec.extrahost") + keyPlatform = contextKeyT("llb.platform") + keyNetwork = contextKeyT("llb.network") + keySecurity = contextKeyT("llb.security") +) + +func addEnvf(key, value string, replace bool, v ...interface{}) StateOption { + if replace { + value = fmt.Sprintf(value, v...) + } + return func(s State) State { + return s.WithValue(keyEnv, getEnv(s).AddOrReplace(key, value)) + } +} + +func dir(str string) StateOption { + return dirf(str, false) +} + +func dirf(value string, replace bool, v ...interface{}) StateOption { + if replace { + value = fmt.Sprintf(value, v...) + } + return func(s State) State { + if !path.IsAbs(value) { + prev := getDir(s) + if prev == "" { + prev = "/" + } + value = path.Join(prev, value) + } + return s.WithValue(keyDir, value) + } +} + +func user(str string) StateOption { + return func(s State) State { + return s.WithValue(keyUser, str) + } +} + +func reset(s_ State) StateOption { + return func(s State) State { + s = NewState(s.Output()) + s.ctx = s_.ctx + return s + } +} + +func getEnv(s State) EnvList { + v := s.Value(keyEnv) + if v != nil { + return v.(EnvList) + } + return EnvList{} +} + +func getDir(s State) string { + v := s.Value(keyDir) + if v != nil { + return v.(string) + } + return "" +} + +func getArgs(s State) []string { + v := s.Value(keyArgs) + if v != nil { + return v.([]string) + } + return nil +} + +func getUser(s State) string { + v := s.Value(keyUser) + if v != nil { + return v.(string) + } + return "" +} + +func args(args ...string) StateOption { + return func(s State) State { + return s.WithValue(keyArgs, args) + } +} + +func shlexf(str string, replace bool, v ...interface{}) StateOption { + if replace { + str = fmt.Sprintf(str, v...) + } + return func(s State) State { + arg, err := shlex.Split(str) + if err != nil { + // TODO: handle error + } + return args(arg...)(s) + } +} + +func platform(p specs.Platform) StateOption { + return func(s State) State { + return s.WithValue(keyPlatform, platforms.Normalize(p)) + } +} + +func getPlatform(s State) *specs.Platform { + v := s.Value(keyPlatform) + if v != nil { + p := v.(specs.Platform) + return &p + } + return nil +} + +func extraHost(host string, ip net.IP) StateOption { + return func(s State) State { + return s.WithValue(keyExtraHost, append(getExtraHosts(s), HostIP{Host: host, IP: ip})) + } +} + +func getExtraHosts(s State) []HostIP { + v := s.Value(keyExtraHost) + if v != nil { + return v.([]HostIP) + } + return nil +} + +type HostIP struct { + Host string + IP net.IP +} + +func network(v pb.NetMode) StateOption { + return func(s State) State { + return s.WithValue(keyNetwork, v) + } +} +func getNetwork(s State) pb.NetMode { + v := s.Value(keyNetwork) + if v != nil { + n := v.(pb.NetMode) + return n + } + return NetModeSandbox +} + +func security(v pb.SecurityMode) StateOption { + return func(s State) State { + return s.WithValue(keySecurity, v) + } +} +func getSecurity(s State) pb.SecurityMode { + v := s.Value(keySecurity) + if v != nil { + n := v.(pb.SecurityMode) + return n + } + return SecurityModeSandbox +} + +type EnvList []KeyValue + +type KeyValue struct { + key string + value string +} + +func (e EnvList) AddOrReplace(k, v string) EnvList { + e = e.Delete(k) + e = append(e, KeyValue{key: k, value: v}) + return e +} + +func (e EnvList) SetDefault(k, v string) EnvList { + if _, ok := e.Get(k); !ok { + e = append(e, KeyValue{key: k, value: v}) + } + return e +} + +func (e EnvList) Delete(k string) EnvList { + e = append([]KeyValue(nil), e...) + if i, ok := e.Index(k); ok { + return append(e[:i], e[i+1:]...) + } + return e +} + +func (e EnvList) Get(k string) (string, bool) { + if index, ok := e.Index(k); ok { + return e[index].value, true + } + return "", false +} + +func (e EnvList) Index(k string) (int, bool) { + for i, kv := range e { + if kv.key == k { + return i, true + } + } + return -1, false +} + +func (e EnvList) ToArray() []string { + out := make([]string, 0, len(e)) + for _, kv := range e { + out = append(out, kv.key+"="+kv.value) + } + return out +} diff --git a/vendor/github.com/moby/buildkit/client/llb/resolver.go b/vendor/github.com/moby/buildkit/client/llb/resolver.go new file mode 100644 index 00000000..73d89e19 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/resolver.go @@ -0,0 +1,26 @@ +package llb + +import ( + "context" + + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// WithMetaResolver adds a metadata resolver to an image +func WithMetaResolver(mr ImageMetaResolver) ImageOption { + return imageOptionFunc(func(ii *ImageInfo) { + ii.metaResolver = mr + }) +} + +// ImageMetaResolver can resolve image config metadata from a reference +type ImageMetaResolver interface { + ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (digest.Digest, []byte, error) +} + +type ResolveImageConfigOpt struct { + Platform *specs.Platform + ResolveMode string + LogName string +} diff --git a/vendor/github.com/moby/buildkit/client/llb/source.go b/vendor/github.com/moby/buildkit/client/llb/source.go new file mode 100644 index 00000000..662966e3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/source.go @@ -0,0 +1,428 @@ +package llb + +import ( + "context" + _ "crypto/sha256" + "encoding/json" + "os" + "strconv" + "strings" + + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/apicaps" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type SourceOp struct { + MarshalCache + id string + attrs map[string]string + output Output + constraints Constraints + err error +} + +func NewSource(id string, attrs map[string]string, c Constraints) *SourceOp { + s := &SourceOp{ + id: id, + attrs: attrs, + constraints: c, + } + s.output = &output{vertex: s, platform: c.Platform} + return s +} + +func (s *SourceOp) Validate() error { + if s.err != nil { + return s.err + } + if s.id == "" { + return errors.Errorf("source identifier can't be empty") + } + return nil +} + +func (s *SourceOp) Marshal(constraints *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) { + if s.Cached(constraints) { + return s.Load() + } + if err := s.Validate(); err != nil { + return "", nil, nil, err + } + + if strings.HasPrefix(s.id, "local://") { + if _, hasSession := s.attrs[pb.AttrLocalSessionID]; !hasSession { + uid := s.constraints.LocalUniqueID + if uid == "" { + uid = constraints.LocalUniqueID + } + s.attrs[pb.AttrLocalUniqueID] = uid + addCap(&s.constraints, pb.CapSourceLocalUnique) + } + } + proto, md := MarshalConstraints(constraints, &s.constraints) + + proto.Op = &pb.Op_Source{ + Source: &pb.SourceOp{Identifier: s.id, Attrs: s.attrs}, + } + + if !platformSpecificSource(s.id) { + proto.Platform = nil + } + + dt, err := proto.Marshal() + if err != nil { + return "", nil, nil, err + } + + s.Store(dt, md, constraints) + return s.Load() +} + +func (s *SourceOp) Output() Output { + return s.output +} + +func (s *SourceOp) Inputs() []Output { + return nil +} + +func Image(ref string, opts ...ImageOption) State { + r, err := reference.ParseNormalizedNamed(ref) + if err == nil { + ref = reference.TagNameOnly(r).String() + } + var info ImageInfo + for _, opt := range opts { + opt.SetImageOption(&info) + } + + addCap(&info.Constraints, pb.CapSourceImage) + + attrs := map[string]string{} + if info.resolveMode != 0 { + attrs[pb.AttrImageResolveMode] = info.resolveMode.String() + if info.resolveMode == ResolveModeForcePull { + addCap(&info.Constraints, pb.CapSourceImageResolveMode) // only require cap for security enforced mode + } + } + + if info.RecordType != "" { + attrs[pb.AttrImageRecordType] = info.RecordType + } + + src := NewSource("docker-image://"+ref, attrs, info.Constraints) // controversial + if err != nil { + src.err = err + } + if info.metaResolver != nil { + _, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{ + Platform: info.Constraints.Platform, + ResolveMode: info.resolveMode.String(), + }) + if err != nil { + src.err = err + } else { + st, err := NewState(src.Output()).WithImageConfig(dt) + if err == nil { + return st + } + src.err = err + } + } + return NewState(src.Output()) +} + +type ImageOption interface { + SetImageOption(*ImageInfo) +} + +type imageOptionFunc func(*ImageInfo) + +func (fn imageOptionFunc) SetImageOption(ii *ImageInfo) { + fn(ii) +} + +var MarkImageInternal = imageOptionFunc(func(ii *ImageInfo) { + ii.RecordType = "internal" +}) + +type ResolveMode int + +const ( + ResolveModeDefault ResolveMode = iota + ResolveModeForcePull + ResolveModePreferLocal +) + +func (r ResolveMode) SetImageOption(ii *ImageInfo) { + ii.resolveMode = r +} + +func (r ResolveMode) String() string { + switch r { + case ResolveModeDefault: + return pb.AttrImageResolveModeDefault + case ResolveModeForcePull: + return pb.AttrImageResolveModeForcePull + case ResolveModePreferLocal: + return pb.AttrImageResolveModePreferLocal + default: + return "" + } +} + +type ImageInfo struct { + constraintsWrapper + metaResolver ImageMetaResolver + resolveMode ResolveMode + RecordType string +} + +func Git(remote, ref string, opts ...GitOption) State { + url := "" + + for _, prefix := range []string{ + "http://", "https://", "git://", "git@", + } { + if strings.HasPrefix(remote, prefix) { + url = strings.Split(remote, "#")[0] + remote = strings.TrimPrefix(remote, prefix) + } + } + + id := remote + + if ref != "" { + id += "#" + ref + } + + gi := &GitInfo{} + for _, o := range opts { + o.SetGitOption(gi) + } + attrs := map[string]string{} + if gi.KeepGitDir { + attrs[pb.AttrKeepGitDir] = "true" + addCap(&gi.Constraints, pb.CapSourceGitKeepDir) + } + if url != "" { + attrs[pb.AttrFullRemoteURL] = url + addCap(&gi.Constraints, pb.CapSourceGitFullURL) + } + + addCap(&gi.Constraints, pb.CapSourceGit) + + source := NewSource("git://"+id, attrs, gi.Constraints) + return NewState(source.Output()) +} + +type GitOption interface { + SetGitOption(*GitInfo) +} +type gitOptionFunc func(*GitInfo) + +func (fn gitOptionFunc) SetGitOption(gi *GitInfo) { + fn(gi) +} + +type GitInfo struct { + constraintsWrapper + KeepGitDir bool +} + +func KeepGitDir() GitOption { + return gitOptionFunc(func(gi *GitInfo) { + gi.KeepGitDir = true + }) +} + +func Scratch() State { + return NewState(nil) +} + +func Local(name string, opts ...LocalOption) State { + gi := &LocalInfo{} + + for _, o := range opts { + o.SetLocalOption(gi) + } + attrs := map[string]string{} + if gi.SessionID != "" { + attrs[pb.AttrLocalSessionID] = gi.SessionID + addCap(&gi.Constraints, pb.CapSourceLocalSessionID) + } + if gi.IncludePatterns != "" { + attrs[pb.AttrIncludePatterns] = gi.IncludePatterns + addCap(&gi.Constraints, pb.CapSourceLocalIncludePatterns) + } + if gi.FollowPaths != "" { + attrs[pb.AttrFollowPaths] = gi.FollowPaths + addCap(&gi.Constraints, pb.CapSourceLocalFollowPaths) + } + if gi.ExcludePatterns != "" { + attrs[pb.AttrExcludePatterns] = gi.ExcludePatterns + addCap(&gi.Constraints, pb.CapSourceLocalExcludePatterns) + } + if gi.SharedKeyHint != "" { + attrs[pb.AttrSharedKeyHint] = gi.SharedKeyHint + addCap(&gi.Constraints, pb.CapSourceLocalSharedKeyHint) + } + + addCap(&gi.Constraints, pb.CapSourceLocal) + + source := NewSource("local://"+name, attrs, gi.Constraints) + return NewState(source.Output()) +} + +type LocalOption interface { + SetLocalOption(*LocalInfo) +} + +type localOptionFunc func(*LocalInfo) + +func (fn localOptionFunc) SetLocalOption(li *LocalInfo) { + fn(li) +} + +func SessionID(id string) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + li.SessionID = id + }) +} + +func IncludePatterns(p []string) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + if len(p) == 0 { + li.IncludePatterns = "" + return + } + dt, _ := json.Marshal(p) // empty on error + li.IncludePatterns = string(dt) + }) +} + +func FollowPaths(p []string) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + if len(p) == 0 { + li.FollowPaths = "" + return + } + dt, _ := json.Marshal(p) // empty on error + li.FollowPaths = string(dt) + }) +} + +func ExcludePatterns(p []string) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + if len(p) == 0 { + li.ExcludePatterns = "" + return + } + dt, _ := json.Marshal(p) // empty on error + li.ExcludePatterns = string(dt) + }) +} + +func SharedKeyHint(h string) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + li.SharedKeyHint = h + }) +} + +type LocalInfo struct { + constraintsWrapper + SessionID string + IncludePatterns string + ExcludePatterns string + FollowPaths string + SharedKeyHint string +} + +func HTTP(url string, opts ...HTTPOption) State { + hi := &HTTPInfo{} + for _, o := range opts { + o.SetHTTPOption(hi) + } + attrs := map[string]string{} + if hi.Checksum != "" { + attrs[pb.AttrHTTPChecksum] = hi.Checksum.String() + addCap(&hi.Constraints, pb.CapSourceHTTPChecksum) + } + if hi.Filename != "" { + attrs[pb.AttrHTTPFilename] = hi.Filename + } + if hi.Perm != 0 { + attrs[pb.AttrHTTPPerm] = "0" + strconv.FormatInt(int64(hi.Perm), 8) + addCap(&hi.Constraints, pb.CapSourceHTTPPerm) + } + if hi.UID != 0 { + attrs[pb.AttrHTTPUID] = strconv.Itoa(hi.UID) + addCap(&hi.Constraints, pb.CapSourceHTTPUIDGID) + } + if hi.GID != 0 { + attrs[pb.AttrHTTPGID] = strconv.Itoa(hi.GID) + addCap(&hi.Constraints, pb.CapSourceHTTPUIDGID) + } + + addCap(&hi.Constraints, pb.CapSourceHTTP) + source := NewSource(url, attrs, hi.Constraints) + return NewState(source.Output()) +} + +type HTTPInfo struct { + constraintsWrapper + Checksum digest.Digest + Filename string + Perm int + UID int + GID int +} + +type HTTPOption interface { + SetHTTPOption(*HTTPInfo) +} + +type httpOptionFunc func(*HTTPInfo) + +func (fn httpOptionFunc) SetHTTPOption(hi *HTTPInfo) { + fn(hi) +} + +func Checksum(dgst digest.Digest) HTTPOption { + return httpOptionFunc(func(hi *HTTPInfo) { + hi.Checksum = dgst + }) +} + +func Chmod(perm os.FileMode) HTTPOption { + return httpOptionFunc(func(hi *HTTPInfo) { + hi.Perm = int(perm) & 0777 + }) +} + +func Filename(name string) HTTPOption { + return httpOptionFunc(func(hi *HTTPInfo) { + hi.Filename = name + }) +} + +func Chown(uid, gid int) HTTPOption { + return httpOptionFunc(func(hi *HTTPInfo) { + hi.UID = uid + hi.GID = gid + }) +} + +func platformSpecificSource(id string) bool { + return strings.HasPrefix(id, "docker-image://") +} + +func addCap(c *Constraints, id apicaps.CapID) { + if c.Metadata.Caps == nil { + c.Metadata.Caps = make(map[apicaps.CapID]bool) + } + c.Metadata.Caps[id] = true +} diff --git a/vendor/github.com/moby/buildkit/client/llb/state.go b/vendor/github.com/moby/buildkit/client/llb/state.go new file mode 100644 index 00000000..ba8845e0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/state.go @@ -0,0 +1,515 @@ +package llb + +import ( + "context" + "encoding/json" + "fmt" + "net" + "strings" + + "github.com/containerd/containerd/platforms" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/apicaps" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +type StateOption func(State) State + +type Output interface { + ToInput(*Constraints) (*pb.Input, error) + Vertex() Vertex +} + +type Vertex interface { + Validate() error + Marshal(*Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) + Output() Output + Inputs() []Output +} + +func NewState(o Output) State { + s := State{ + out: o, + ctx: context.Background(), + } + s = dir("/")(s) + s = s.ensurePlatform() + return s +} + +type State struct { + out Output + ctx context.Context + opts []ConstraintsOpt +} + +func (s State) ensurePlatform() State { + if o, ok := s.out.(interface { + Platform() *specs.Platform + }); ok { + if p := o.Platform(); p != nil { + s = platform(*p)(s) + } + } + return s +} + +func (s State) WithValue(k, v interface{}) State { + return State{ + out: s.out, + ctx: context.WithValue(s.ctx, k, v), + } +} + +func (s State) Value(k interface{}) interface{} { + return s.ctx.Value(k) +} + +func (s State) SetMarshalDefaults(co ...ConstraintsOpt) State { + s.opts = co + return s +} + +func (s State) Marshal(co ...ConstraintsOpt) (*Definition, error) { + def := &Definition{ + Metadata: make(map[digest.Digest]pb.OpMetadata, 0), + } + if s.Output() == nil { + return def, nil + } + + defaultPlatform := platforms.Normalize(platforms.DefaultSpec()) + c := &Constraints{ + Platform: &defaultPlatform, + LocalUniqueID: identity.NewID(), + } + for _, o := range append(s.opts, co...) { + o.SetConstraintsOption(c) + } + + def, err := marshal(s.Output().Vertex(), def, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, c) + if err != nil { + return def, err + } + inp, err := s.Output().ToInput(c) + if err != nil { + return def, err + } + proto := &pb.Op{Inputs: []*pb.Input{inp}} + dt, err := proto.Marshal() + if err != nil { + return def, err + } + def.Def = append(def.Def, dt) + + dgst := digest.FromBytes(dt) + md := def.Metadata[dgst] + md.Caps = map[apicaps.CapID]bool{ + pb.CapConstraints: true, + pb.CapPlatform: true, + } + + for _, m := range def.Metadata { + if m.IgnoreCache { + md.Caps[pb.CapMetaIgnoreCache] = true + } + if m.Description != nil { + md.Caps[pb.CapMetaDescription] = true + } + if m.ExportCache != nil { + md.Caps[pb.CapMetaExportCache] = true + } + } + + def.Metadata[dgst] = md + + return def, nil +} + +func marshal(v Vertex, def *Definition, cache map[digest.Digest]struct{}, vertexCache map[Vertex]struct{}, c *Constraints) (*Definition, error) { + if _, ok := vertexCache[v]; ok { + return def, nil + } + for _, inp := range v.Inputs() { + var err error + def, err = marshal(inp.Vertex(), def, cache, vertexCache, c) + if err != nil { + return def, err + } + } + + dgst, dt, opMeta, err := v.Marshal(c) + if err != nil { + return def, err + } + vertexCache[v] = struct{}{} + if opMeta != nil { + def.Metadata[dgst] = mergeMetadata(def.Metadata[dgst], *opMeta) + } + if _, ok := cache[dgst]; ok { + return def, nil + } + def.Def = append(def.Def, dt) + cache[dgst] = struct{}{} + return def, nil +} + +func (s State) Validate() error { + return s.Output().Vertex().Validate() +} + +func (s State) Output() Output { + return s.out +} + +func (s State) WithOutput(o Output) State { + s = State{ + out: o, + ctx: s.ctx, + } + s = s.ensurePlatform() + return s +} + +func (s State) WithImageConfig(c []byte) (State, error) { + var img struct { + Config struct { + Env []string `json:"Env,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + User string `json:"User,omitempty"` + } `json:"config,omitempty"` + } + if err := json.Unmarshal(c, &img); err != nil { + return State{}, err + } + for _, env := range img.Config.Env { + parts := strings.SplitN(env, "=", 2) + if len(parts[0]) > 0 { + var v string + if len(parts) > 1 { + v = parts[1] + } + s = s.AddEnv(parts[0], v) + } + } + s = s.Dir(img.Config.WorkingDir) + return s, nil +} + +func (s State) Run(ro ...RunOption) ExecState { + ei := &ExecInfo{State: s} + if p := s.GetPlatform(); p != nil { + ei.Constraints.Platform = p + } + for _, o := range ro { + o.SetRunOption(ei) + } + meta := Meta{ + Args: getArgs(ei.State), + Cwd: getDir(ei.State), + Env: getEnv(ei.State), + User: getUser(ei.State), + ProxyEnv: ei.ProxyEnv, + ExtraHosts: getExtraHosts(ei.State), + Network: getNetwork(ei.State), + Security: getSecurity(ei.State), + } + + exec := NewExecOp(s.Output(), meta, ei.ReadonlyRootFS, ei.Constraints) + for _, m := range ei.Mounts { + exec.AddMount(m.Target, m.Source, m.Opts...) + } + exec.secrets = ei.Secrets + exec.ssh = ei.SSH + + return ExecState{ + State: s.WithOutput(exec.Output()), + exec: exec, + } +} + +func (s State) File(a *FileAction, opts ...ConstraintsOpt) State { + var c Constraints + for _, o := range opts { + o.SetConstraintsOption(&c) + } + + return s.WithOutput(NewFileOp(s, a, c).Output()) +} + +func (s State) AddEnv(key, value string) State { + return addEnvf(key, value, false)(s) +} + +func (s State) AddEnvf(key, value string, v ...interface{}) State { + return addEnvf(key, value, true, v...)(s) +} + +func (s State) Dir(str string) State { + return dirf(str, false)(s) +} +func (s State) Dirf(str string, v ...interface{}) State { + return dirf(str, true, v...)(s) +} + +func (s State) GetEnv(key string) (string, bool) { + return getEnv(s).Get(key) +} + +func (s State) Env() []string { + return getEnv(s).ToArray() +} + +func (s State) GetDir() string { + return getDir(s) +} + +func (s State) GetArgs() []string { + return getArgs(s) +} + +func (s State) Reset(s2 State) State { + return reset(s2)(s) +} + +func (s State) User(v string) State { + return user(v)(s) +} + +func (s State) Platform(p specs.Platform) State { + return platform(p)(s) +} + +func (s State) GetPlatform() *specs.Platform { + return getPlatform(s) +} + +func (s State) Network(n pb.NetMode) State { + return network(n)(s) +} + +func (s State) GetNetwork() pb.NetMode { + return getNetwork(s) +} +func (s State) Security(n pb.SecurityMode) State { + return security(n)(s) +} + +func (s State) GetSecurity() pb.SecurityMode { + return getSecurity(s) +} + +func (s State) With(so ...StateOption) State { + for _, o := range so { + s = o(s) + } + return s +} + +func (s State) AddExtraHost(host string, ip net.IP) State { + return extraHost(host, ip)(s) +} + +func (s State) isFileOpCopyInput() {} + +type output struct { + vertex Vertex + getIndex func() (pb.OutputIndex, error) + err error + platform *specs.Platform +} + +func (o *output) ToInput(c *Constraints) (*pb.Input, error) { + if o.err != nil { + return nil, o.err + } + var index pb.OutputIndex + if o.getIndex != nil { + var err error + index, err = o.getIndex() + if err != nil { + return nil, err + } + } + dgst, _, _, err := o.vertex.Marshal(c) + if err != nil { + return nil, err + } + return &pb.Input{Digest: dgst, Index: index}, nil +} + +func (o *output) Vertex() Vertex { + return o.vertex +} + +func (o *output) Platform() *specs.Platform { + return o.platform +} + +type ConstraintsOpt interface { + SetConstraintsOption(*Constraints) + RunOption + LocalOption + HTTPOption + ImageOption + GitOption +} + +type constraintsOptFunc func(m *Constraints) + +func (fn constraintsOptFunc) SetConstraintsOption(m *Constraints) { + fn(m) +} + +func (fn constraintsOptFunc) SetRunOption(ei *ExecInfo) { + ei.applyConstraints(fn) +} + +func (fn constraintsOptFunc) SetLocalOption(li *LocalInfo) { + li.applyConstraints(fn) +} + +func (fn constraintsOptFunc) SetHTTPOption(hi *HTTPInfo) { + hi.applyConstraints(fn) +} + +func (fn constraintsOptFunc) SetImageOption(ii *ImageInfo) { + ii.applyConstraints(fn) +} + +func (fn constraintsOptFunc) SetGitOption(gi *GitInfo) { + gi.applyConstraints(fn) +} + +func mergeMetadata(m1, m2 pb.OpMetadata) pb.OpMetadata { + if m2.IgnoreCache { + m1.IgnoreCache = true + } + if len(m2.Description) > 0 { + if m1.Description == nil { + m1.Description = make(map[string]string) + } + for k, v := range m2.Description { + m1.Description[k] = v + } + } + if m2.ExportCache != nil { + m1.ExportCache = m2.ExportCache + } + + for k := range m2.Caps { + if m1.Caps == nil { + m1.Caps = make(map[apicaps.CapID]bool, len(m2.Caps)) + } + m1.Caps[k] = true + } + + return m1 +} + +var IgnoreCache = constraintsOptFunc(func(c *Constraints) { + c.Metadata.IgnoreCache = true +}) + +func WithDescription(m map[string]string) ConstraintsOpt { + return constraintsOptFunc(func(c *Constraints) { + if c.Metadata.Description == nil { + c.Metadata.Description = map[string]string{} + } + for k, v := range m { + c.Metadata.Description[k] = v + } + }) +} + +func WithCustomName(name string) ConstraintsOpt { + return WithDescription(map[string]string{ + "llb.customname": name, + }) +} + +func WithCustomNamef(name string, a ...interface{}) ConstraintsOpt { + return WithCustomName(fmt.Sprintf(name, a...)) +} + +// WithExportCache forces results for this vertex to be exported with the cache +func WithExportCache() ConstraintsOpt { + return constraintsOptFunc(func(c *Constraints) { + c.Metadata.ExportCache = &pb.ExportCache{Value: true} + }) +} + +// WithoutExportCache sets results for this vertex to be not exported with +// the cache +func WithoutExportCache() ConstraintsOpt { + return constraintsOptFunc(func(c *Constraints) { + // ExportCache with value false means to disable exporting + c.Metadata.ExportCache = &pb.ExportCache{Value: false} + }) +} + +// WithoutDefaultExportCache resets the cache export for the vertex to use +// the default defined by the build configuration. +func WithoutDefaultExportCache() ConstraintsOpt { + return constraintsOptFunc(func(c *Constraints) { + // nil means no vertex based config has been set + c.Metadata.ExportCache = nil + }) +} + +// WithCaps exposes supported LLB caps to the marshaler +func WithCaps(caps apicaps.CapSet) ConstraintsOpt { + return constraintsOptFunc(func(c *Constraints) { + c.Caps = &caps + }) +} + +type constraintsWrapper struct { + Constraints +} + +func (cw *constraintsWrapper) applyConstraints(f func(c *Constraints)) { + f(&cw.Constraints) +} + +type Constraints struct { + Platform *specs.Platform + WorkerConstraints []string + Metadata pb.OpMetadata + LocalUniqueID string + Caps *apicaps.CapSet +} + +func Platform(p specs.Platform) ConstraintsOpt { + return constraintsOptFunc(func(c *Constraints) { + c.Platform = &p + }) +} + +func LocalUniqueID(v string) ConstraintsOpt { + return constraintsOptFunc(func(c *Constraints) { + c.LocalUniqueID = v + }) +} + +var ( + LinuxAmd64 = Platform(specs.Platform{OS: "linux", Architecture: "amd64"}) + LinuxArmhf = Platform(specs.Platform{OS: "linux", Architecture: "arm", Variant: "v7"}) + LinuxArm = LinuxArmhf + LinuxArmel = Platform(specs.Platform{OS: "linux", Architecture: "arm", Variant: "v6"}) + LinuxArm64 = Platform(specs.Platform{OS: "linux", Architecture: "arm64"}) + LinuxS390x = Platform(specs.Platform{OS: "linux", Architecture: "s390x"}) + LinuxPpc64le = Platform(specs.Platform{OS: "linux", Architecture: "ppc64le"}) + Darwin = Platform(specs.Platform{OS: "darwin", Architecture: "amd64"}) + Windows = Platform(specs.Platform{OS: "windows", Architecture: "amd64"}) +) + +func Require(filters ...string) ConstraintsOpt { + return constraintsOptFunc(func(c *Constraints) { + for _, f := range filters { + c.WorkerConstraints = append(c.WorkerConstraints, f) + } + }) +} diff --git a/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go b/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go new file mode 100644 index 00000000..13f1d507 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go @@ -0,0 +1,113 @@ +package ociindex + +import ( + "encoding/json" + "io/ioutil" + "os" + + "github.com/gofrs/flock" + "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + // IndexJSONLockFileSuffix is the suffix of the lock file + IndexJSONLockFileSuffix = ".lock" +) + +// PutDescToIndex puts desc to index with tag. +// Existing manifests with the same tag will be removed from the index. +func PutDescToIndex(index *v1.Index, desc v1.Descriptor, tag string) error { + if index == nil { + index = &v1.Index{} + } + if index.SchemaVersion == 0 { + index.SchemaVersion = 2 + } + if tag != "" { + if desc.Annotations == nil { + desc.Annotations = make(map[string]string) + } + desc.Annotations[v1.AnnotationRefName] = tag + // remove existing manifests with the same tag + var manifests []v1.Descriptor + for _, m := range index.Manifests { + if m.Annotations[v1.AnnotationRefName] != tag { + manifests = append(manifests, m) + } + } + index.Manifests = manifests + } + index.Manifests = append(index.Manifests, desc) + return nil +} + +func PutDescToIndexJSONFileLocked(indexJSONPath string, desc v1.Descriptor, tag string) error { + lockPath := indexJSONPath + IndexJSONLockFileSuffix + lock := flock.New(lockPath) + locked, err := lock.TryLock() + if err != nil { + return errors.Wrapf(err, "could not lock %s", lockPath) + } + if !locked { + return errors.Errorf("could not lock %s", lockPath) + } + defer func() { + lock.Unlock() + os.RemoveAll(lockPath) + }() + f, err := os.OpenFile(indexJSONPath, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + return errors.Wrapf(err, "could not open %s", indexJSONPath) + } + defer f.Close() + var idx v1.Index + b, err := ioutil.ReadAll(f) + if err != nil { + return errors.Wrapf(err, "could not read %s", indexJSONPath) + } + if len(b) > 0 { + if err := json.Unmarshal(b, &idx); err != nil { + return errors.Wrapf(err, "could not unmarshal %s (%q)", indexJSONPath, string(b)) + } + } + if err = PutDescToIndex(&idx, desc, tag); err != nil { + return err + } + b, err = json.Marshal(idx) + if err != nil { + return err + } + if _, err = f.WriteAt(b, 0); err != nil { + return err + } + if err = f.Truncate(int64(len(b))); err != nil { + return err + } + return nil +} + +func ReadIndexJSONFileLocked(indexJSONPath string) (*v1.Index, error) { + lockPath := indexJSONPath + IndexJSONLockFileSuffix + lock := flock.New(lockPath) + locked, err := lock.TryRLock() + if err != nil { + return nil, errors.Wrapf(err, "could not lock %s", lockPath) + } + if !locked { + return nil, errors.Errorf("could not lock %s", lockPath) + } + defer func() { + lock.Unlock() + os.RemoveAll(lockPath) + }() + b, err := ioutil.ReadFile(indexJSONPath) + if err != nil { + return nil, errors.Wrapf(err, "could not read %s", indexJSONPath) + } + var idx v1.Index + if err := json.Unmarshal(b, &idx); err != nil { + return nil, errors.Wrapf(err, "could not unmarshal %s (%q)", indexJSONPath, string(b)) + } + return &idx, nil +} diff --git a/vendor/github.com/moby/buildkit/client/prune.go b/vendor/github.com/moby/buildkit/client/prune.go new file mode 100644 index 00000000..27fe5dd8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/prune.go @@ -0,0 +1,83 @@ +package client + +import ( + "context" + "io" + "time" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/pkg/errors" +) + +func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOption) error { + info := &PruneInfo{} + for _, o := range opts { + o.SetPruneOption(info) + } + + req := &controlapi.PruneRequest{ + Filter: info.Filter, + KeepDuration: int64(info.KeepDuration), + KeepBytes: int64(info.KeepBytes), + } + if info.All { + req.All = true + } + cl, err := c.controlClient().Prune(ctx, req) + if err != nil { + return errors.Wrap(err, "failed to call prune") + } + + for { + d, err := cl.Recv() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + if ch != nil { + ch <- UsageInfo{ + ID: d.ID, + Mutable: d.Mutable, + InUse: d.InUse, + Size: d.Size_, + Parent: d.Parent, + CreatedAt: d.CreatedAt, + Description: d.Description, + UsageCount: int(d.UsageCount), + LastUsedAt: d.LastUsedAt, + RecordType: UsageRecordType(d.RecordType), + Shared: d.Shared, + } + } + } +} + +type PruneOption interface { + SetPruneOption(*PruneInfo) +} + +type PruneInfo struct { + Filter []string + All bool + KeepDuration time.Duration + KeepBytes int64 +} + +type pruneOptionFunc func(*PruneInfo) + +func (f pruneOptionFunc) SetPruneOption(pi *PruneInfo) { + f(pi) +} + +var PruneAll = pruneOptionFunc(func(pi *PruneInfo) { + pi.All = true +}) + +func WithKeepOpt(duration time.Duration, bytes int64) PruneOption { + return pruneOptionFunc(func(pi *PruneInfo) { + pi.KeepDuration = duration + pi.KeepBytes = bytes + }) +} diff --git a/vendor/github.com/moby/buildkit/client/solve.go b/vendor/github.com/moby/buildkit/client/solve.go new file mode 100644 index 00000000..1df996a8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/solve.go @@ -0,0 +1,489 @@ +package client + +import ( + "context" + "encoding/json" + "io" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containerd/containerd/content" + contentlocal "github.com/containerd/containerd/content/local" + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/client/ociindex" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" + sessioncontent "github.com/moby/buildkit/session/content" + "github.com/moby/buildkit/session/filesync" + "github.com/moby/buildkit/session/grpchijack" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/entitlements" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + fstypes "github.com/tonistiigi/fsutil/types" + "golang.org/x/sync/errgroup" +) + +type SolveOpt struct { + Exports []ExportEntry + LocalDirs map[string]string + SharedKey string + Frontend string + FrontendAttrs map[string]string + FrontendInputs map[string]llb.State + CacheExports []CacheOptionsEntry + CacheImports []CacheOptionsEntry + Session []session.Attachable + AllowedEntitlements []entitlements.Entitlement + SharedSession *session.Session // TODO: refactor to better session syncing + SessionPreInitialized bool // TODO: refactor to better session syncing +} + +type ExportEntry struct { + Type string + Attrs map[string]string + Output func(map[string]string) (io.WriteCloser, error) // for ExporterOCI and ExporterDocker + OutputDir string // for ExporterLocal +} + +type CacheOptionsEntry struct { + Type string + Attrs map[string]string +} + +// Solve calls Solve on the controller. +// def must be nil if (and only if) opt.Frontend is set. +func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) { + defer func() { + if statusChan != nil { + close(statusChan) + } + }() + + if opt.Frontend == "" && def == nil { + return nil, errors.New("invalid empty definition") + } + if opt.Frontend != "" && def != nil { + return nil, errors.Errorf("invalid definition for frontend %s", opt.Frontend) + } + + return c.solve(ctx, def, nil, opt, statusChan) +} + +type runGatewayCB func(ref string, s *session.Session) error + +func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runGatewayCB, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) { + if def != nil && runGateway != nil { + return nil, errors.New("invalid with def and cb") + } + + syncedDirs, err := prepareSyncedDirs(def, opt.LocalDirs) + if err != nil { + return nil, err + } + + ref := identity.NewID() + eg, ctx := errgroup.WithContext(ctx) + + statusContext, cancelStatus := context.WithCancel(context.Background()) + defer cancelStatus() + + if span := opentracing.SpanFromContext(ctx); span != nil { + statusContext = opentracing.ContextWithSpan(statusContext, span) + } + + s := opt.SharedSession + + if s == nil { + if opt.SessionPreInitialized { + return nil, errors.Errorf("no session provided for preinitialized option") + } + s, err = session.NewSession(statusContext, defaultSessionName(), opt.SharedKey) + if err != nil { + return nil, errors.Wrap(err, "failed to create session") + } + } + + cacheOpt, err := parseCacheOptions(opt) + if err != nil { + return nil, err + } + + var ex ExportEntry + + if !opt.SessionPreInitialized { + if len(syncedDirs) > 0 { + s.Allow(filesync.NewFSSyncProvider(syncedDirs)) + } + + for _, a := range opt.Session { + s.Allow(a) + } + + if len(opt.Exports) > 1 { + return nil, errors.New("currently only single Exports can be specified") + } + if len(opt.Exports) == 1 { + ex = opt.Exports[0] + } + + switch ex.Type { + case ExporterLocal: + if ex.Output != nil { + return nil, errors.New("output file writer is not supported by local exporter") + } + if ex.OutputDir == "" { + return nil, errors.New("output directory is required for local exporter") + } + s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir)) + case ExporterOCI, ExporterDocker, ExporterTar: + if ex.OutputDir != "" { + return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type) + } + if ex.Output == nil { + return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type) + } + s.Allow(filesync.NewFSSyncTarget(ex.Output)) + default: + if ex.Output != nil { + return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type) + } + if ex.OutputDir != "" { + return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type) + } + } + + if len(cacheOpt.contentStores) > 0 { + s.Allow(sessioncontent.NewAttachable(cacheOpt.contentStores)) + } + + eg.Go(func() error { + return s.Run(statusContext, grpchijack.Dialer(c.controlClient())) + }) + } + + for k, v := range cacheOpt.frontendAttrs { + opt.FrontendAttrs[k] = v + } + + solveCtx, cancelSolve := context.WithCancel(ctx) + var res *SolveResponse + eg.Go(func() error { + ctx := solveCtx + defer cancelSolve() + + defer func() { // make sure the Status ends cleanly on build errors + go func() { + <-time.After(3 * time.Second) + cancelStatus() + }() + logrus.Debugf("stopping session") + s.Close() + }() + var pbd *pb.Definition + if def != nil { + pbd = def.ToPB() + } + + frontendInputs := make(map[string]*pb.Definition) + for key, st := range opt.FrontendInputs { + def, err := st.Marshal() + if err != nil { + return err + } + frontendInputs[key] = def.ToPB() + } + + resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{ + Ref: ref, + Definition: pbd, + Exporter: ex.Type, + ExporterAttrs: ex.Attrs, + Session: s.ID(), + Frontend: opt.Frontend, + FrontendAttrs: opt.FrontendAttrs, + FrontendInputs: frontendInputs, + Cache: cacheOpt.options, + Entitlements: opt.AllowedEntitlements, + }) + if err != nil { + return errors.Wrap(err, "failed to solve") + } + res = &SolveResponse{ + ExporterResponse: resp.ExporterResponse, + } + return nil + }) + + if runGateway != nil { + eg.Go(func() error { + err := runGateway(ref, s) + if err == nil { + return nil + } + + // If the callback failed then the main + // `Solve` (called above) should error as + // well. However as a fallback we wait up to + // 5s for that to happen before failing this + // goroutine. + select { + case <-solveCtx.Done(): + case <-time.After(5 * time.Second): + cancelSolve() + } + + return err + }) + } + + eg.Go(func() error { + stream, err := c.controlClient().Status(statusContext, &controlapi.StatusRequest{ + Ref: ref, + }) + if err != nil { + return errors.Wrap(err, "failed to get status") + } + for { + resp, err := stream.Recv() + if err != nil { + if err == io.EOF { + return nil + } + return errors.Wrap(err, "failed to receive status") + } + s := SolveStatus{} + for _, v := range resp.Vertexes { + s.Vertexes = append(s.Vertexes, &Vertex{ + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + }) + } + for _, v := range resp.Statuses { + s.Statuses = append(s.Statuses, &VertexStatus{ + ID: v.ID, + Vertex: v.Vertex, + Name: v.Name, + Total: v.Total, + Current: v.Current, + Timestamp: v.Timestamp, + Started: v.Started, + Completed: v.Completed, + }) + } + for _, v := range resp.Logs { + s.Logs = append(s.Logs, &VertexLog{ + Vertex: v.Vertex, + Stream: int(v.Stream), + Data: v.Msg, + Timestamp: v.Timestamp, + }) + } + if statusChan != nil { + statusChan <- &s + } + } + }) + + if err := eg.Wait(); err != nil { + return nil, err + } + // Update index.json of exported cache content store + // FIXME(AkihiroSuda): dedupe const definition of cache/remotecache.ExporterResponseManifestDesc = "cache.manifest" + if manifestDescJSON := res.ExporterResponse["cache.manifest"]; manifestDescJSON != "" { + var manifestDesc ocispec.Descriptor + if err = json.Unmarshal([]byte(manifestDescJSON), &manifestDesc); err != nil { + return nil, err + } + for indexJSONPath, tag := range cacheOpt.indicesToUpdate { + if err = ociindex.PutDescToIndexJSONFileLocked(indexJSONPath, manifestDesc, tag); err != nil { + return nil, err + } + } + } + return res, nil +} + +func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) { + for _, d := range localDirs { + fi, err := os.Stat(d) + if err != nil { + return nil, errors.Wrapf(err, "could not find %s", d) + } + if !fi.IsDir() { + return nil, errors.Errorf("%s not a directory", d) + } + } + resetUIDAndGID := func(p string, st *fstypes.Stat) bool { + st.Uid = 0 + st.Gid = 0 + return true + } + + dirs := make([]filesync.SyncedDir, 0, len(localDirs)) + if def == nil { + for name, d := range localDirs { + dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID}) + } + } else { + for _, dt := range def.Def { + var op pb.Op + if err := (&op).Unmarshal(dt); err != nil { + return nil, errors.Wrap(err, "failed to parse llb proto op") + } + if src := op.GetSource(); src != nil { + if strings.HasPrefix(src.Identifier, "local://") { // TODO: just make a type property + name := strings.TrimPrefix(src.Identifier, "local://") + d, ok := localDirs[name] + if !ok { + return nil, errors.Errorf("local directory %s not enabled", name) + } + dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID}) // TODO: excludes + } + } + } + } + return dirs, nil +} + +func defaultSessionName() string { + wd, err := os.Getwd() + if err != nil { + return "unknown" + } + return filepath.Base(wd) +} + +type cacheOptions struct { + options controlapi.CacheOptions + contentStores map[string]content.Store // key: ID of content store ("local:" + csDir) + indicesToUpdate map[string]string // key: index.JSON file name, value: tag + frontendAttrs map[string]string +} + +func parseCacheOptions(opt SolveOpt) (*cacheOptions, error) { + var ( + cacheExports []*controlapi.CacheOptionsEntry + cacheImports []*controlapi.CacheOptionsEntry + // legacy API is used for registry caches, because the daemon might not support the new API + legacyExportRef string + legacyImportRefs []string + ) + contentStores := make(map[string]content.Store) + indicesToUpdate := make(map[string]string) // key: index.JSON file name, value: tag + frontendAttrs := make(map[string]string) + legacyExportAttrs := make(map[string]string) + for _, ex := range opt.CacheExports { + if ex.Type == "local" { + csDir := ex.Attrs["dest"] + if csDir == "" { + return nil, errors.New("local cache exporter requires dest") + } + if err := os.MkdirAll(csDir, 0755); err != nil { + return nil, err + } + cs, err := contentlocal.NewStore(csDir) + if err != nil { + return nil, err + } + contentStores["local:"+csDir] = cs + // TODO(AkihiroSuda): support custom index JSON path and tag + indexJSONPath := filepath.Join(csDir, "index.json") + indicesToUpdate[indexJSONPath] = "latest" + } + if ex.Type == "registry" && legacyExportRef == "" { + legacyExportRef = ex.Attrs["ref"] + for k, v := range ex.Attrs { + if k != "ref" { + legacyExportAttrs[k] = v + } + } + } else { + cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{ + Type: ex.Type, + Attrs: ex.Attrs, + }) + } + } + for _, im := range opt.CacheImports { + attrs := im.Attrs + if im.Type == "local" { + csDir := im.Attrs["src"] + if csDir == "" { + return nil, errors.New("local cache importer requires src") + } + cs, err := contentlocal.NewStore(csDir) + if err != nil { + logrus.Warning("local cache import at " + csDir + " not found due to err: " + err.Error()) + continue + } + // if digest is not specified, load from "latest" tag + if attrs["digest"] == "" { + idx, err := ociindex.ReadIndexJSONFileLocked(filepath.Join(csDir, "index.json")) + if err != nil { + logrus.Warning("local cache import at " + csDir + " not found due to err: " + err.Error()) + continue + } + for _, m := range idx.Manifests { + if (m.Annotations[ocispec.AnnotationRefName] == "latest" && attrs["tag"] == "") || (attrs["tag"] != "" && m.Annotations[ocispec.AnnotationRefName] == attrs["tag"]) { + attrs["digest"] = string(m.Digest) + break + } + } + if attrs["digest"] == "" { + return nil, errors.New("local cache importer requires either explicit digest, \"latest\" tag or custom tag on index.json") + } + } + contentStores["local:"+csDir] = cs + + } + if im.Type == "registry" { + legacyImportRef := attrs["ref"] + legacyImportRefs = append(legacyImportRefs, legacyImportRef) + } else { + cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{ + Type: im.Type, + Attrs: attrs, + }) + } + } + if opt.Frontend != "" { + // use legacy API for registry importers, because the frontend might not support the new API + if len(legacyImportRefs) > 0 { + frontendAttrs["cache-from"] = strings.Join(legacyImportRefs, ",") + } + // use new API for other importers + if len(cacheImports) > 0 { + s, err := json.Marshal(cacheImports) + if err != nil { + return nil, err + } + frontendAttrs["cache-imports"] = string(s) + } + } + res := cacheOptions{ + options: controlapi.CacheOptions{ + // old API (for registry caches, planned to be removed in early 2019) + ExportRefDeprecated: legacyExportRef, + ExportAttrsDeprecated: legacyExportAttrs, + ImportRefsDeprecated: legacyImportRefs, + // new API + Exports: cacheExports, + Imports: cacheImports, + }, + contentStores: contentStores, + indicesToUpdate: indicesToUpdate, + frontendAttrs: frontendAttrs, + } + return &res, nil +} diff --git a/vendor/github.com/moby/buildkit/client/workers.go b/vendor/github.com/moby/buildkit/client/workers.go new file mode 100644 index 00000000..b011ee2e --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/workers.go @@ -0,0 +1,70 @@ +package client + +import ( + "context" + "time" + + controlapi "github.com/moby/buildkit/api/services/control" + apitypes "github.com/moby/buildkit/api/types" + "github.com/moby/buildkit/solver/pb" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// WorkerInfo contains information about a worker +type WorkerInfo struct { + ID string + Labels map[string]string + Platforms []specs.Platform + GCPolicy []PruneInfo +} + +// ListWorkers lists all active workers +func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]*WorkerInfo, error) { + info := &ListWorkersInfo{} + for _, o := range opts { + o.SetListWorkersOption(info) + } + + req := &controlapi.ListWorkersRequest{Filter: info.Filter} + resp, err := c.controlClient().ListWorkers(ctx, req) + if err != nil { + return nil, errors.Wrap(err, "failed to list workers") + } + + var wi []*WorkerInfo + + for _, w := range resp.Record { + wi = append(wi, &WorkerInfo{ + ID: w.ID, + Labels: w.Labels, + Platforms: pb.ToSpecPlatforms(w.Platforms), + GCPolicy: fromAPIGCPolicy(w.GCPolicy), + }) + } + + return wi, nil +} + +// ListWorkersOption is an option for a worker list query +type ListWorkersOption interface { + SetListWorkersOption(*ListWorkersInfo) +} + +// ListWorkersInfo is a payload for worker list query +type ListWorkersInfo struct { + Filter []string +} + +func fromAPIGCPolicy(in []*apitypes.GCPolicy) []PruneInfo { + out := make([]PruneInfo, 0, len(in)) + for _, p := range in { + out = append(out, PruneInfo{ + All: p.All, + Filter: p.Filters, + KeepDuration: time.Duration(p.KeepDuration), + KeepBytes: p.KeepBytes, + }) + } + return out +} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go new file mode 100644 index 00000000..fc2da18f --- /dev/null +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go @@ -0,0 +1,105 @@ +package config + +// Config provides containerd configuration data for the server +type Config struct { + Debug bool `toml:"debug"` + + // Root is the path to a directory where buildkit will store persistent data + Root string `toml:"root"` + + //Entitlements e.g. security.insecure, network.host + Entitlements []string `toml:"insecure-entitlements"` + // GRPC configuration settings + GRPC GRPCConfig `toml:"grpc"` + + Workers struct { + OCI OCIConfig `toml:"oci"` + Containerd ContainerdConfig `toml:"containerd"` + } `toml:"worker"` + + Registries map[string]RegistryConfig `toml:"registry"` + + DNS *DNSConfig `toml:"dns"` +} + +type GRPCConfig struct { + Address []string `toml:"address"` + DebugAddress string `toml:"debugAddress"` + UID int `toml:"uid"` + GID int `toml:"gid"` + + TLS TLSConfig `toml:"tls"` + // MaxRecvMsgSize int `toml:"max_recv_message_size"` + // MaxSendMsgSize int `toml:"max_send_message_size"` +} + +type RegistryConfig struct { + Mirrors []string `toml:"mirrors"` + PlainHTTP *bool `toml:"http"` + Insecure *bool `toml:"insecure"` + RootCAs []string `toml:"ca"` + KeyPairs []TLSKeyPair `toml:"keypair"` + TLSConfigDir []string `toml:"tlsconfigdir"` +} + +type TLSKeyPair struct { + Key string `toml:"key"` + Certificate string `toml:"cert"` +} + +type TLSConfig struct { + Cert string `toml:"cert"` + Key string `toml:"key"` + CA string `toml:"ca"` +} + +type GCConfig struct { + GC *bool `toml:"gc"` + GCKeepStorage int64 `toml:"gckeepstorage"` + GCPolicy []GCPolicy `toml:"gcpolicy"` +} + +type NetworkConfig struct { + Mode string `toml:"networkMode"` + CNIConfigPath string `toml:"cniConfigPath"` + CNIBinaryPath string `toml:"cniBinaryPath"` +} + +type OCIConfig struct { + Enabled *bool `toml:"enabled"` + Labels map[string]string `toml:"labels"` + Platforms []string `toml:"platforms"` + Snapshotter string `toml:"snapshotter"` + Rootless bool `toml:"rootless"` + NoProcessSandbox bool `toml:"noProcessSandbox"` + GCConfig + NetworkConfig + // UserRemapUnsupported is unsupported key for testing. The feature is + // incomplete and the intention is to make it default without config. + UserRemapUnsupported string `toml:"userRemapUnsupported"` + // For use in storing the OCI worker binary name that will replace buildkit-runc + Binary string `toml:"binary"` +} + +type ContainerdConfig struct { + Address string `toml:"address"` + Enabled *bool `toml:"enabled"` + Labels map[string]string `toml:"labels"` + Platforms []string `toml:"platforms"` + Namespace string `toml:"namespace"` + GCConfig + NetworkConfig +} + +type GCPolicy struct { + All bool `toml:"all"` + KeepBytes int64 `toml:"keepBytes"` + KeepDuration int64 `toml:"keepDuration"` + Filters []string `toml:"filters"` +} + +type DNSConfig struct { + Nameservers []string `toml:"nameservers"` + Options []string `toml:"options"` + SearchDomains []string `toml:"searchDomains"` +} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go new file mode 100644 index 00000000..6f3f1978 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go @@ -0,0 +1,31 @@ +package config + +const defaultCap int64 = 2e9 // 2GB + +func DefaultGCPolicy(p string, keep int64) []GCPolicy { + if keep == 0 { + keep = DetectDefaultGCCap(p) + } + return []GCPolicy{ + // if build cache uses more than 512MB delete the most easily reproducible data after it has not been used for 2 days + { + Filters: []string{"type==source.local,type==exec.cachemount,type==source.git.checkout"}, + KeepDuration: 48 * 3600, // 48h + KeepBytes: 512 * 1e6, // 512MB + }, + // remove any data not used for 60 days + { + KeepDuration: 60 * 24 * 3600, // 60d + KeepBytes: keep, + }, + // keep the unshared build cache under cap + { + KeepBytes: keep, + }, + // if previous policies were insufficient start deleting internal data to keep build cache under cap + { + All: true, + KeepBytes: keep, + }, + } +} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go new file mode 100644 index 00000000..e022fba8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go @@ -0,0 +1,17 @@ +// +build !windows + +package config + +import ( + "syscall" +) + +func DetectDefaultGCCap(root string) int64 { + var st syscall.Statfs_t + if err := syscall.Statfs(root, &st); err != nil { + return defaultCap + } + diskSize := int64(st.Bsize) * int64(st.Blocks) + avail := diskSize / 10 + return (avail/(1<<30) + 1) * 1e9 // round up +} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go new file mode 100644 index 00000000..81d4b82c --- /dev/null +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package config + +func DetectDefaultGCCap(root string) int64 { + return defaultCap +} diff --git a/vendor/github.com/moby/buildkit/control/control.go b/vendor/github.com/moby/buildkit/control/control.go new file mode 100644 index 00000000..ff46d237 --- /dev/null +++ b/vendor/github.com/moby/buildkit/control/control.go @@ -0,0 +1,450 @@ +package control + +import ( + "context" + "sync" + "sync/atomic" + "time" + + controlapi "github.com/moby/buildkit/api/services/control" + apitypes "github.com/moby/buildkit/api/types" + "github.com/moby/buildkit/cache/remotecache" + "github.com/moby/buildkit/client" + controlgateway "github.com/moby/buildkit/control/gateway" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/grpchijack" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/util/throttle" + "github.com/moby/buildkit/worker" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" +) + +type Opt struct { + SessionManager *session.Manager + WorkerController *worker.Controller + Frontends map[string]frontend.Frontend + CacheKeyStorage solver.CacheKeyStorage + ResolveCacheExporterFuncs map[string]remotecache.ResolveCacheExporterFunc + ResolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc + Entitlements []string +} + +type Controller struct { // TODO: ControlService + buildCount int64 + opt Opt + solver *llbsolver.Solver + cache solver.CacheManager + gatewayForwarder *controlgateway.GatewayForwarder + throttledGC func() + gcmu sync.Mutex +} + +func NewController(opt Opt) (*Controller, error) { + cache := solver.NewCacheManager("local", opt.CacheKeyStorage, worker.NewCacheResultStorage(opt.WorkerController)) + + gatewayForwarder := controlgateway.NewGatewayForwarder() + + solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, cache, opt.ResolveCacheImporterFuncs, gatewayForwarder, opt.SessionManager, opt.Entitlements) + if err != nil { + return nil, errors.Wrap(err, "failed to create solver") + } + + c := &Controller{ + opt: opt, + solver: solver, + cache: cache, + gatewayForwarder: gatewayForwarder, + } + c.throttledGC = throttle.ThrottleAfter(time.Minute, c.gc) + + defer func() { + time.AfterFunc(time.Second, c.throttledGC) + }() + + return c, nil +} + +func (c *Controller) Register(server *grpc.Server) error { + controlapi.RegisterControlServer(server, c) + c.gatewayForwarder.Register(server) + return nil +} + +func (c *Controller) DiskUsage(ctx context.Context, r *controlapi.DiskUsageRequest) (*controlapi.DiskUsageResponse, error) { + resp := &controlapi.DiskUsageResponse{} + workers, err := c.opt.WorkerController.List() + if err != nil { + return nil, err + } + for _, w := range workers { + du, err := w.DiskUsage(ctx, client.DiskUsageInfo{ + Filter: r.Filter, + }) + if err != nil { + return nil, err + } + + for _, r := range du { + resp.Record = append(resp.Record, &controlapi.UsageRecord{ + // TODO: add worker info + ID: r.ID, + Mutable: r.Mutable, + InUse: r.InUse, + Size_: r.Size, + Parent: r.Parent, + UsageCount: int64(r.UsageCount), + Description: r.Description, + CreatedAt: r.CreatedAt, + LastUsedAt: r.LastUsedAt, + RecordType: string(r.RecordType), + Shared: r.Shared, + }) + } + } + return resp, nil +} + +func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Control_PruneServer) error { + if atomic.LoadInt64(&c.buildCount) == 0 { + imageutil.CancelCacheLeases() + } + + ch := make(chan client.UsageInfo) + + eg, ctx := errgroup.WithContext(stream.Context()) + workers, err := c.opt.WorkerController.List() + if err != nil { + return errors.Wrap(err, "failed to list workers for prune") + } + + didPrune := false + defer func() { + if didPrune { + if c, ok := c.cache.(interface { + ReleaseUnreferenced() error + }); ok { + if err := c.ReleaseUnreferenced(); err != nil { + logrus.Errorf("failed to release cache metadata: %+v", err) + } + } + } + }() + + for _, w := range workers { + func(w worker.Worker) { + eg.Go(func() error { + return w.Prune(ctx, ch, client.PruneInfo{ + Filter: req.Filter, + All: req.All, + KeepDuration: time.Duration(req.KeepDuration), + KeepBytes: req.KeepBytes, + }) + }) + }(w) + } + + eg2, _ := errgroup.WithContext(stream.Context()) + + eg2.Go(func() error { + defer close(ch) + return eg.Wait() + }) + + eg2.Go(func() error { + for r := range ch { + didPrune = true + if err := stream.Send(&controlapi.UsageRecord{ + // TODO: add worker info + ID: r.ID, + Mutable: r.Mutable, + InUse: r.InUse, + Size_: r.Size, + Parent: r.Parent, + UsageCount: int64(r.UsageCount), + Description: r.Description, + CreatedAt: r.CreatedAt, + LastUsedAt: r.LastUsedAt, + RecordType: string(r.RecordType), + Shared: r.Shared, + }); err != nil { + return err + } + } + return nil + }) + + return eg2.Wait() +} + +func translateLegacySolveRequest(req *controlapi.SolveRequest) error { + // translates ExportRef and ExportAttrs to new Exports (v0.4.0) + if legacyExportRef := req.Cache.ExportRefDeprecated; legacyExportRef != "" { + ex := &controlapi.CacheOptionsEntry{ + Type: "registry", + Attrs: req.Cache.ExportAttrsDeprecated, + } + if ex.Attrs == nil { + ex.Attrs = make(map[string]string) + } + ex.Attrs["ref"] = legacyExportRef + // FIXME(AkihiroSuda): skip append if already exists + req.Cache.Exports = append(req.Cache.Exports, ex) + req.Cache.ExportRefDeprecated = "" + req.Cache.ExportAttrsDeprecated = nil + } + // translates ImportRefs to new Imports (v0.4.0) + for _, legacyImportRef := range req.Cache.ImportRefsDeprecated { + im := &controlapi.CacheOptionsEntry{ + Type: "registry", + Attrs: map[string]string{"ref": legacyImportRef}, + } + // FIXME(AkihiroSuda): skip append if already exists + req.Cache.Imports = append(req.Cache.Imports, im) + } + req.Cache.ImportRefsDeprecated = nil + return nil +} + +func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) { + atomic.AddInt64(&c.buildCount, 1) + defer atomic.AddInt64(&c.buildCount, -1) + + if err := translateLegacySolveRequest(req); err != nil { + return nil, err + } + ctx = session.NewContext(ctx, req.Session) + + defer func() { + time.AfterFunc(time.Second, c.throttledGC) + }() + + var expi exporter.ExporterInstance + // TODO: multiworker + // This is actually tricky, as the exporter should come from the worker that has the returned reference. We may need to delay this so that the solver loads this. + w, err := c.opt.WorkerController.GetDefault() + if err != nil { + return nil, err + } + if req.Exporter != "" { + exp, err := w.Exporter(req.Exporter, c.opt.SessionManager) + if err != nil { + return nil, err + } + expi, err = exp.Resolve(ctx, req.ExporterAttrs) + if err != nil { + return nil, err + } + } + + var ( + cacheExporter remotecache.Exporter + cacheExportMode solver.CacheExportMode + cacheImports []frontend.CacheOptionsEntry + ) + if len(req.Cache.Exports) > 1 { + // TODO(AkihiroSuda): this should be fairly easy + return nil, errors.New("specifying multiple cache exports is not supported currently") + } + + if len(req.Cache.Exports) == 1 { + e := req.Cache.Exports[0] + cacheExporterFunc, ok := c.opt.ResolveCacheExporterFuncs[e.Type] + if !ok { + return nil, errors.Errorf("unknown cache exporter: %q", e.Type) + } + cacheExporter, err = cacheExporterFunc(ctx, e.Attrs) + if err != nil { + return nil, err + } + cacheExportMode = parseCacheExportMode(e.Attrs["mode"]) + } + for _, im := range req.Cache.Imports { + cacheImports = append(cacheImports, frontend.CacheOptionsEntry{ + Type: im.Type, + Attrs: im.Attrs, + }) + } + + resp, err := c.solver.Solve(ctx, req.Ref, frontend.SolveRequest{ + Frontend: req.Frontend, + Definition: req.Definition, + FrontendOpt: req.FrontendAttrs, + FrontendInputs: req.FrontendInputs, + CacheImports: cacheImports, + }, llbsolver.ExporterRequest{ + Exporter: expi, + CacheExporter: cacheExporter, + CacheExportMode: cacheExportMode, + }, req.Entitlements) + if err != nil { + return nil, err + } + return &controlapi.SolveResponse{ + ExporterResponse: resp.ExporterResponse, + }, nil +} + +func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Control_StatusServer) error { + ch := make(chan *client.SolveStatus, 8) + + eg, ctx := errgroup.WithContext(stream.Context()) + eg.Go(func() error { + return c.solver.Status(ctx, req.Ref, ch) + }) + + eg.Go(func() error { + for { + ss, ok := <-ch + if !ok { + return nil + } + sr := controlapi.StatusResponse{} + for _, v := range ss.Vertexes { + sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{ + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + }) + } + for _, v := range ss.Statuses { + sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{ + ID: v.ID, + Vertex: v.Vertex, + Name: v.Name, + Current: v.Current, + Total: v.Total, + Timestamp: v.Timestamp, + Started: v.Started, + Completed: v.Completed, + }) + } + for _, v := range ss.Logs { + sr.Logs = append(sr.Logs, &controlapi.VertexLog{ + Vertex: v.Vertex, + Stream: int64(v.Stream), + Msg: v.Data, + Timestamp: v.Timestamp, + }) + } + if err := stream.SendMsg(&sr); err != nil { + return err + } + } + }) + + return eg.Wait() +} + +func (c *Controller) Session(stream controlapi.Control_SessionServer) error { + logrus.Debugf("session started") + conn, closeCh, opts := grpchijack.Hijack(stream) + defer conn.Close() + + ctx, cancel := context.WithCancel(stream.Context()) + go func() { + <-closeCh + cancel() + }() + + err := c.opt.SessionManager.HandleConn(ctx, conn, opts) + logrus.Debugf("session finished: %v", err) + return err +} + +func (c *Controller) ListWorkers(ctx context.Context, r *controlapi.ListWorkersRequest) (*controlapi.ListWorkersResponse, error) { + resp := &controlapi.ListWorkersResponse{} + workers, err := c.opt.WorkerController.List(r.Filter...) + if err != nil { + return nil, err + } + for _, w := range workers { + resp.Record = append(resp.Record, &apitypes.WorkerRecord{ + ID: w.ID(), + Labels: w.Labels(), + Platforms: pb.PlatformsFromSpec(w.Platforms(true)), + GCPolicy: toPBGCPolicy(w.GCPolicy()), + }) + } + return resp, nil +} + +func (c *Controller) gc() { + c.gcmu.Lock() + defer c.gcmu.Unlock() + + workers, err := c.opt.WorkerController.List() + if err != nil { + return + } + + eg, ctx := errgroup.WithContext(context.TODO()) + + var size int64 + ch := make(chan client.UsageInfo) + done := make(chan struct{}) + go func() { + for ui := range ch { + size += ui.Size + } + close(done) + }() + + for _, w := range workers { + func(w worker.Worker) { + eg.Go(func() error { + if policy := w.GCPolicy(); len(policy) > 0 { + return w.Prune(ctx, ch, policy...) + } + return nil + }) + }(w) + } + + err = eg.Wait() + close(ch) + if err != nil { + logrus.Errorf("gc error: %+v", err) + } + <-done + if size > 0 { + logrus.Debugf("gc cleaned up %d bytes", size) + } +} + +func parseCacheExportMode(mode string) solver.CacheExportMode { + switch mode { + case "min": + return solver.CacheExportModeMin + case "max": + return solver.CacheExportModeMax + case "": + default: + logrus.Debugf("skipping invalid cache export mode: %s", mode) + } + return solver.CacheExportModeMin +} + +func toPBGCPolicy(in []client.PruneInfo) []*apitypes.GCPolicy { + policy := make([]*apitypes.GCPolicy, 0, len(in)) + for _, p := range in { + policy = append(policy, &apitypes.GCPolicy{ + All: p.All, + KeepBytes: p.KeepBytes, + KeepDuration: int64(p.KeepDuration), + Filters: p.Filter, + }) + } + return policy +} diff --git a/vendor/github.com/moby/buildkit/control/gateway/gateway.go b/vendor/github.com/moby/buildkit/control/gateway/gateway.go new file mode 100644 index 00000000..2d4e8b38 --- /dev/null +++ b/vendor/github.com/moby/buildkit/control/gateway/gateway.go @@ -0,0 +1,154 @@ +package gateway + +import ( + "context" + "sync" + "time" + + "github.com/moby/buildkit/client/buildid" + "github.com/moby/buildkit/frontend/gateway" + gwapi "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/pkg/errors" + "google.golang.org/grpc" +) + +type GatewayForwarder struct { + mu sync.RWMutex + updateCond *sync.Cond + builds map[string]gateway.LLBBridgeForwarder +} + +func NewGatewayForwarder() *GatewayForwarder { + gwf := &GatewayForwarder{ + builds: map[string]gateway.LLBBridgeForwarder{}, + } + gwf.updateCond = sync.NewCond(gwf.mu.RLocker()) + return gwf +} + +func (gwf *GatewayForwarder) Register(server *grpc.Server) { + gwapi.RegisterLLBBridgeServer(server, gwf) +} + +func (gwf *GatewayForwarder) RegisterBuild(ctx context.Context, id string, bridge gateway.LLBBridgeForwarder) error { + gwf.mu.Lock() + defer gwf.mu.Unlock() + + if _, ok := gwf.builds[id]; ok { + return errors.Errorf("build ID %s exists", id) + } + + gwf.builds[id] = bridge + gwf.updateCond.Broadcast() + + return nil +} + +func (gwf *GatewayForwarder) UnregisterBuild(ctx context.Context, id string) { + gwf.mu.Lock() + defer gwf.mu.Unlock() + + delete(gwf.builds, id) + gwf.updateCond.Broadcast() +} + +func (gwf *GatewayForwarder) lookupForwarder(ctx context.Context) (gateway.LLBBridgeForwarder, error) { + bid := buildid.FromIncomingContext(ctx) + if bid == "" { + return nil, errors.New("no buildid found in context") + } + + ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + defer cancel() + + go func() { + <-ctx.Done() + gwf.mu.Lock() + gwf.updateCond.Broadcast() + gwf.mu.Unlock() + }() + + gwf.mu.RLock() + defer gwf.mu.RUnlock() + for { + select { + case <-ctx.Done(): + return nil, errors.Errorf("no such job %s", bid) + default: + } + fwd, ok := gwf.builds[bid] + if !ok { + gwf.updateCond.Wait() + continue + } + return fwd, nil + } +} + +func (gwf *GatewayForwarder) ResolveImageConfig(ctx context.Context, req *gwapi.ResolveImageConfigRequest) (*gwapi.ResolveImageConfigResponse, error) { + fwd, err := gwf.lookupForwarder(ctx) + if err != nil { + return nil, errors.Wrap(err, "forwarding ResolveImageConfig") + } + + return fwd.ResolveImageConfig(ctx, req) +} + +func (gwf *GatewayForwarder) Solve(ctx context.Context, req *gwapi.SolveRequest) (*gwapi.SolveResponse, error) { + fwd, err := gwf.lookupForwarder(ctx) + if err != nil { + return nil, errors.Wrap(err, "forwarding Solve") + } + + return fwd.Solve(ctx, req) +} + +func (gwf *GatewayForwarder) ReadFile(ctx context.Context, req *gwapi.ReadFileRequest) (*gwapi.ReadFileResponse, error) { + fwd, err := gwf.lookupForwarder(ctx) + if err != nil { + return nil, errors.Wrap(err, "forwarding ReadFile") + } + return fwd.ReadFile(ctx, req) +} + +func (gwf *GatewayForwarder) Ping(ctx context.Context, req *gwapi.PingRequest) (*gwapi.PongResponse, error) { + fwd, err := gwf.lookupForwarder(ctx) + if err != nil { + return nil, errors.Wrap(err, "forwarding Ping") + } + return fwd.Ping(ctx, req) +} + +func (gwf *GatewayForwarder) Return(ctx context.Context, req *gwapi.ReturnRequest) (*gwapi.ReturnResponse, error) { + fwd, err := gwf.lookupForwarder(ctx) + if err != nil { + return nil, errors.Wrap(err, "forwarding Return") + } + res, err := fwd.Return(ctx, req) + return res, err +} + +func (gwf *GatewayForwarder) Inputs(ctx context.Context, req *gwapi.InputsRequest) (*gwapi.InputsResponse, error) { + fwd, err := gwf.lookupForwarder(ctx) + if err != nil { + return nil, errors.Wrap(err, "forwarding Inputs") + } + res, err := fwd.Inputs(ctx, req) + return res, err +} + +func (gwf *GatewayForwarder) ReadDir(ctx context.Context, req *gwapi.ReadDirRequest) (*gwapi.ReadDirResponse, error) { + fwd, err := gwf.lookupForwarder(ctx) + if err != nil { + return nil, errors.Wrap(err, "forwarding ReadDir") + } + return fwd.ReadDir(ctx, req) +} + +func (gwf *GatewayForwarder) StatFile(ctx context.Context, req *gwapi.StatFileRequest) (*gwapi.StatFileResponse, error) { + fwd, err := gwf.lookupForwarder(ctx) + if err != nil { + return nil, errors.Wrap(err, "forwarding StatFile") + } + return fwd.StatFile(ctx, req) +} diff --git a/vendor/github.com/moby/buildkit/executor/executor.go b/vendor/github.com/moby/buildkit/executor/executor.go new file mode 100644 index 00000000..df6d7920 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/executor.go @@ -0,0 +1,39 @@ +package executor + +import ( + "context" + "io" + "net" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/solver/pb" +) + +type Meta struct { + Args []string + Env []string + User string + Cwd string + Tty bool + ReadonlyRootFS bool + ExtraHosts []HostIP + NetMode pb.NetMode + SecurityMode pb.SecurityMode +} + +type Mount struct { + Src cache.Mountable + Selector string + Dest string + Readonly bool +} + +type Executor interface { + // TODO: add stdout/err + Exec(ctx context.Context, meta Meta, rootfs cache.Mountable, mounts []Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error +} + +type HostIP struct { + Host string + IP net.IP +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/hosts.go b/vendor/github.com/moby/buildkit/executor/oci/hosts.go new file mode 100644 index 00000000..3b3f86db --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/oci/hosts.go @@ -0,0 +1,78 @@ +package oci + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/identity" +) + +const hostsContent = ` +127.0.0.1 localhost buildkitsandbox +::1 localhost ip6-localhost ip6-loopback +` + +func GetHostsFile(ctx context.Context, stateDir string, extraHosts []executor.HostIP, idmap *idtools.IdentityMapping) (string, func(), error) { + if len(extraHosts) == 0 { + _, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) { + _, _, err := makeHostsFile(stateDir, nil, idmap) + return nil, err + }) + if err != nil { + return "", nil, err + } + return filepath.Join(stateDir, "hosts"), func() {}, nil + } + return makeHostsFile(stateDir, extraHosts, idmap) +} + +func makeHostsFile(stateDir string, extraHosts []executor.HostIP, idmap *idtools.IdentityMapping) (string, func(), error) { + p := filepath.Join(stateDir, "hosts") + if len(extraHosts) != 0 { + p += "." + identity.NewID() + } + _, err := os.Stat(p) + if err == nil { + return "", func() {}, nil + } + if !os.IsNotExist(err) { + return "", nil, err + } + + b := &bytes.Buffer{} + + if _, err := b.Write([]byte(hostsContent)); err != nil { + return "", nil, err + } + + for _, h := range extraHosts { + if _, err := b.Write([]byte(fmt.Sprintf("%s\t%s\n", h.IP.String(), h.Host))); err != nil { + return "", nil, err + } + } + + tmpPath := p + ".tmp" + if err := ioutil.WriteFile(tmpPath, b.Bytes(), 0644); err != nil { + return "", nil, err + } + + if idmap != nil { + root := idmap.RootPair() + if err := os.Chown(tmpPath, root.UID, root.GID); err != nil { + return "", nil, err + } + } + + if err := os.Rename(tmpPath, p); err != nil { + return "", nil, err + } + return p, func() { + os.RemoveAll(p) + }, nil +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/mounts.go b/vendor/github.com/moby/buildkit/executor/oci/mounts.go new file mode 100644 index 00000000..8d32a95f --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/oci/mounts.go @@ -0,0 +1,117 @@ +package oci + +import ( + "context" + "path/filepath" + "strings" + + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// MountOpts sets oci spec specific info for mount points +type MountOpts func([]specs.Mount) ([]specs.Mount, error) + +//GetMounts returns default required for buildkit +// https://github.com/moby/buildkit/issues/429 +func GetMounts(ctx context.Context, mountOpts ...MountOpts) ([]specs.Mount, error) { + mounts := []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/dev/shm", + Type: "tmpfs", + Source: "shm", + Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + } + var err error + for _, o := range mountOpts { + mounts, err = o(mounts) + if err != nil { + return nil, err + } + } + return mounts, nil +} + +func withROBind(src, dest string) func(m []specs.Mount) ([]specs.Mount, error) { + return func(m []specs.Mount) ([]specs.Mount, error) { + m = append(m, specs.Mount{ + Destination: dest, + Type: "bind", + Source: src, + Options: []string{"rbind", "ro"}, + }) + return m, nil + } +} + +func hasPrefix(p, prefixDir string) bool { + prefixDir = filepath.Clean(prefixDir) + if prefixDir == "/" { + return true + } + p = filepath.Clean(p) + return p == prefixDir || strings.HasPrefix(p, prefixDir+"/") +} + +func removeMountsWithPrefix(mounts []specs.Mount, prefixDir string) []specs.Mount { + var ret []specs.Mount + for _, m := range mounts { + if !hasPrefix(m.Destination, prefixDir) { + ret = append(ret, m) + } + } + return ret +} + +func withProcessMode(processMode ProcessMode) func([]specs.Mount) ([]specs.Mount, error) { + return func(m []specs.Mount) ([]specs.Mount, error) { + switch processMode { + case ProcessSandbox: + // keep the default + case NoProcessSandbox: + m = removeMountsWithPrefix(m, "/proc") + procMount := specs.Mount{ + Destination: "/proc", + Type: "bind", + Source: "/proc", + // NOTE: "rbind"+"ro" does not make /proc read-only recursively. + // So we keep maskedPath and readonlyPaths (although not mandatory for rootless mode) + Options: []string{"rbind"}, + } + m = append([]specs.Mount{procMount}, m...) + default: + return nil, errors.Errorf("unknown process mode: %v", processMode) + } + return m, nil + } +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go b/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go new file mode 100644 index 00000000..61fd36da --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go @@ -0,0 +1,123 @@ +package oci + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/libnetwork/resolvconf" + "github.com/docker/libnetwork/types" + "github.com/moby/buildkit/util/flightcontrol" +) + +var g flightcontrol.Group +var notFirstRun bool +var lastNotEmpty bool + +// overridden by tests +var resolvconfGet = resolvconf.Get + +type DNSConfig struct { + Nameservers []string + Options []string + SearchDomains []string +} + +func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.IdentityMapping, dns *DNSConfig) (string, error) { + p := filepath.Join(stateDir, "resolv.conf") + _, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) { + generate := !notFirstRun + notFirstRun = true + + if !generate { + fi, err := os.Stat(p) + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + generate = true + } + if !generate { + fiMain, err := os.Stat(resolvconf.Path()) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + if lastNotEmpty { + generate = true + lastNotEmpty = false + } + } else { + if fi.ModTime().Before(fiMain.ModTime()) { + generate = true + } + } + } + } + + if !generate { + return "", nil + } + + var dt []byte + f, err := resolvconfGet() + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + } else { + dt = f.Content + } + + if dns != nil { + var ( + dnsNameservers = resolvconf.GetNameservers(dt, types.IP) + dnsSearchDomains = resolvconf.GetSearchDomains(dt) + dnsOptions = resolvconf.GetOptions(dt) + ) + if len(dns.Nameservers) > 0 { + dnsNameservers = dns.Nameservers + } + if len(dns.SearchDomains) > 0 { + dnsSearchDomains = dns.SearchDomains + } + if len(dns.Options) > 0 { + dnsOptions = dns.Options + } + + f, err = resolvconf.Build(p+".tmp", dnsNameservers, dnsSearchDomains, dnsOptions) + if err != nil { + return "", err + } + dt = f.Content + } + + f, err = resolvconf.FilterResolvDNS(dt, true) + if err != nil { + return "", err + } + + tmpPath := p + ".tmp" + if err := ioutil.WriteFile(tmpPath, f.Content, 0644); err != nil { + return "", err + } + + if idmap != nil { + root := idmap.RootPair() + if err := os.Chown(tmpPath, root.UID, root.GID); err != nil { + return "", err + } + } + + if err := os.Rename(tmpPath, p); err != nil { + return "", err + } + return "", nil + }) + if err != nil { + return "", err + } + return p, nil +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec.go b/vendor/github.com/moby/buildkit/executor/oci/spec.go new file mode 100644 index 00000000..9329fa90 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/oci/spec.go @@ -0,0 +1,13 @@ +package oci + +// ProcMode configures PID namespaces +type ProcessMode int + +const ( + // ProcessSandbox unshares pidns and mount procfs. + ProcessSandbox ProcessMode = iota + // NoProcessSandbox uses host pidns and bind-mount procfs. + // Note that NoProcessSandbox allows build containers to kill (and potentially ptrace) an arbitrary process in the BuildKit host namespace. + // NoProcessSandbox should be enabled only when the BuildKit is running in a container as an unprivileged user. + NoProcessSandbox +) diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go b/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go new file mode 100644 index 00000000..8ab4fb47 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go @@ -0,0 +1,254 @@ +// +build !windows + +package oci + +import ( + "context" + "path" + "sync" + + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/contrib/seccomp" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/oci" + "github.com/containerd/continuity/fs" + "github.com/docker/docker/pkg/idtools" + "github.com/mitchellh/hashstructure" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/entitlements/security" + "github.com/moby/buildkit/util/network" + "github.com/moby/buildkit/util/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// Ideally we don't have to import whole containerd just for the default spec + +// GenerateSpec generates spec using containerd functionality. +// opts are ignored for s.Process, s.Hostname, and s.Mounts . +func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, processMode ProcessMode, idmap *idtools.IdentityMapping, opts ...oci.SpecOpts) (*specs.Spec, func(), error) { + c := &containers.Container{ + ID: id, + } + _, ok := namespaces.Namespace(ctx) + if !ok { + ctx = namespaces.WithNamespace(ctx, "buildkit") + } + if meta.SecurityMode == pb.SecurityMode_INSECURE { + opts = append(opts, security.WithInsecureSpec()) + } else if system.SeccompSupported() && meta.SecurityMode == pb.SecurityMode_SANDBOX { + opts = append(opts, seccomp.WithDefaultProfile()) + } + + switch processMode { + case NoProcessSandbox: + // Mount for /proc is replaced in GetMounts() + opts = append(opts, + oci.WithHostNamespace(specs.PIDNamespace)) + // TODO(AkihiroSuda): Configure seccomp to disable ptrace (and prctl?) explicitly + } + + // Note that containerd.GenerateSpec is namespaced so as to make + // specs.Linux.CgroupsPath namespaced + s, err := oci.GenerateSpec(ctx, nil, c, opts...) + if err != nil { + return nil, nil, err + } + // set the networking information on the spec + namespace.Set(s) + + s.Process.Args = meta.Args + s.Process.Env = meta.Env + s.Process.Cwd = meta.Cwd + s.Process.Rlimits = nil // reset open files limit + s.Process.NoNewPrivileges = false // reset nonewprivileges + s.Hostname = "buildkitsandbox" + + s.Mounts, err = GetMounts(ctx, + withProcessMode(processMode), + withROBind(resolvConf, "/etc/resolv.conf"), + withROBind(hostsFile, "/etc/hosts"), + ) + if err != nil { + return nil, nil, err + } + + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"ro", "nosuid", "noexec", "nodev"}, + }) + + if processMode == NoProcessSandbox { + var maskedPaths []string + for _, s := range s.Linux.MaskedPaths { + if !hasPrefix(s, "/proc") { + maskedPaths = append(maskedPaths, s) + } + } + s.Linux.MaskedPaths = maskedPaths + var readonlyPaths []string + for _, s := range s.Linux.ReadonlyPaths { + if !hasPrefix(s, "/proc") { + readonlyPaths = append(readonlyPaths, s) + } + } + s.Linux.ReadonlyPaths = readonlyPaths + } + + if meta.SecurityMode == pb.SecurityMode_INSECURE { + if err = oci.WithWriteableCgroupfs(ctx, nil, c, s); err != nil { + return nil, nil, err + } + if err = oci.WithWriteableSysfs(ctx, nil, c, s); err != nil { + return nil, nil, err + } + } + + if idmap != nil { + s.Linux.Namespaces = append(s.Linux.Namespaces, specs.LinuxNamespace{ + Type: specs.UserNamespace, + }) + s.Linux.UIDMappings = specMapping(idmap.UIDs()) + s.Linux.GIDMappings = specMapping(idmap.GIDs()) + } + + sm := &submounts{} + + var releasers []func() error + releaseAll := func() { + sm.cleanup() + for _, f := range releasers { + f() + } + } + + for _, m := range mounts { + if m.Src == nil { + return nil, nil, errors.Errorf("mount %s has no source", m.Dest) + } + mountable, err := m.Src.Mount(ctx, m.Readonly) + if err != nil { + releaseAll() + return nil, nil, errors.Wrapf(err, "failed to mount %s", m.Dest) + } + mounts, release, err := mountable.Mount() + if err != nil { + releaseAll() + return nil, nil, errors.WithStack(err) + } + releasers = append(releasers, release) + for _, mount := range mounts { + mount, err = sm.subMount(mount, m.Selector) + if err != nil { + releaseAll() + return nil, nil, err + } + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: m.Dest, + Type: mount.Type, + Source: mount.Source, + Options: mount.Options, + }) + } + } + + return s, releaseAll, nil +} + +type mountRef struct { + mount mount.Mount + unmount func() error +} + +type submounts struct { + m map[uint64]mountRef +} + +func (s *submounts) subMount(m mount.Mount, subPath string) (mount.Mount, error) { + if path.Join("/", subPath) == "/" { + return m, nil + } + if s.m == nil { + s.m = map[uint64]mountRef{} + } + h, err := hashstructure.Hash(m, nil) + if err != nil { + return mount.Mount{}, nil + } + if mr, ok := s.m[h]; ok { + sm, err := sub(mr.mount, subPath) + if err != nil { + return mount.Mount{}, nil + } + return sm, nil + } + + lm := snapshot.LocalMounterWithMounts([]mount.Mount{m}) + + mp, err := lm.Mount() + if err != nil { + return mount.Mount{}, err + } + + opts := []string{"rbind"} + for _, opt := range m.Options { + if opt == "ro" { + opts = append(opts, opt) + } + } + + s.m[h] = mountRef{ + mount: mount.Mount{ + Source: mp, + Type: "bind", + Options: opts, + }, + unmount: lm.Unmount, + } + + sm, err := sub(s.m[h].mount, subPath) + if err != nil { + return mount.Mount{}, err + } + return sm, nil +} + +func (s *submounts) cleanup() { + var wg sync.WaitGroup + wg.Add(len(s.m)) + for _, m := range s.m { + func(m mountRef) { + go func() { + m.unmount() + wg.Done() + }() + }(m) + } + wg.Wait() +} + +func sub(m mount.Mount, subPath string) (mount.Mount, error) { + src, err := fs.RootPath(m.Source, subPath) + if err != nil { + return mount.Mount{}, err + } + m.Source = src + return m, nil +} + +func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping { + var ids []specs.LinuxIDMapping + for _, item := range s { + ids = append(ids, specs.LinuxIDMapping{ + HostID: uint32(item.HostID), + ContainerID: uint32(item.ContainerID), + Size: uint32(item.Size), + }) + } + return ids +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/user.go b/vendor/github.com/moby/buildkit/executor/oci/user.go new file mode 100644 index 00000000..af64231f --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/oci/user.go @@ -0,0 +1,99 @@ +package oci + +import ( + "context" + "errors" + "os" + "strconv" + "strings" + + "github.com/containerd/containerd/containers" + containerdoci "github.com/containerd/containerd/oci" + "github.com/containerd/continuity/fs" + "github.com/opencontainers/runc/libcontainer/user" + "github.com/opencontainers/runtime-spec/specs-go" +) + +func GetUser(ctx context.Context, root, username string) (uint32, uint32, []uint32, error) { + // fast path from uid/gid + if uid, gid, err := ParseUIDGID(username); err == nil { + return uid, gid, nil, nil + } + + passwdFile, err := openUserFile(root, "/etc/passwd") + if err == nil { + defer passwdFile.Close() + } + groupFile, err := openUserFile(root, "/etc/group") + if err == nil { + defer groupFile.Close() + } + + execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) + if err != nil { + return 0, 0, nil, err + } + var sgids []uint32 + for _, g := range execUser.Sgids { + sgids = append(sgids, uint32(g)) + } + return uint32(execUser.Uid), uint32(execUser.Gid), sgids, nil +} + +// ParseUIDGID takes the fast path to parse UID and GID if and only if they are both provided +func ParseUIDGID(str string) (uid uint32, gid uint32, err error) { + if str == "" { + return 0, 0, nil + } + parts := strings.SplitN(str, ":", 2) + if len(parts) == 1 { + return 0, 0, errors.New("groups ID is not provided") + } + if uid, err = parseUID(parts[0]); err != nil { + return 0, 0, err + } + if gid, err = parseUID(parts[1]); err != nil { + return 0, 0, err + } + return +} + +func openUserFile(root, p string) (*os.File, error) { + p, err := fs.RootPath(root, p) + if err != nil { + return nil, err + } + return os.Open(p) +} + +func parseUID(str string) (uint32, error) { + if str == "root" { + return 0, nil + } + uid, err := strconv.ParseUint(str, 10, 32) + if err != nil { + return 0, err + } + return uint32(uid), nil +} + +// WithUIDGID allows the UID and GID for the Process to be set +// FIXME: This is a temporeray fix for the missing supplementary GIDs from containerd +// once the PR in containerd is merged we should remove this function. +func WithUIDGID(uid, gid uint32, sgids []uint32) containerdoci.SpecOpts { + return func(_ context.Context, _ containerdoci.Client, _ *containers.Container, s *containerdoci.Spec) error { + setProcess(s) + s.Process.User.UID = uid + s.Process.User.GID = gid + s.Process.User.AdditionalGids = sgids + return nil + } +} + +// setProcess sets Process to empty if unset +// FIXME: Same on this one. Need to be removed after containerd fix merged +func setProcess(s *containerdoci.Spec) { + if s.Process == nil { + s.Process = &specs.Process{} + } +} diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go new file mode 100644 index 00000000..26e432e6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go @@ -0,0 +1,337 @@ +package runcexecutor + +import ( + "context" + "encoding/json" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/containerd/containerd/mount" + containerdoci "github.com/containerd/containerd/oci" + "github.com/containerd/continuity/fs" + runc "github.com/containerd/go-runc" + "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/executor/oci" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/network" + rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type Opt struct { + // root directory + Root string + CommandCandidates []string + // without root privileges (has nothing to do with Opt.Root directory) + Rootless bool + // DefaultCgroupParent is the cgroup-parent name for executor + DefaultCgroupParent string + // ProcessMode + ProcessMode oci.ProcessMode + IdentityMapping *idtools.IdentityMapping + // runc run --no-pivot (unrecommended) + NoPivot bool + DNS *oci.DNSConfig + OOMScoreAdj *int +} + +var defaultCommandCandidates = []string{"buildkit-runc", "runc"} + +type runcExecutor struct { + runc *runc.Runc + root string + cmd string + cgroupParent string + rootless bool + networkProviders map[pb.NetMode]network.Provider + processMode oci.ProcessMode + idmap *idtools.IdentityMapping + noPivot bool + dns *oci.DNSConfig + oomScoreAdj *int +} + +func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Executor, error) { + cmds := opt.CommandCandidates + if cmds == nil { + cmds = defaultCommandCandidates + } + + var cmd string + var found bool + for _, cmd = range cmds { + if _, err := exec.LookPath(cmd); err == nil { + found = true + break + } + } + if !found { + return nil, errors.Errorf("failed to find %s binary", cmd) + } + + root := opt.Root + + if err := os.MkdirAll(root, 0711); err != nil { + return nil, errors.Wrapf(err, "failed to create %s", root) + } + + root, err := filepath.Abs(root) + if err != nil { + return nil, err + } + root, err = filepath.EvalSymlinks(root) + if err != nil { + return nil, err + } + + // clean up old hosts/resolv.conf file. ignore errors + os.RemoveAll(filepath.Join(root, "hosts")) + os.RemoveAll(filepath.Join(root, "resolv.conf")) + + runtime := &runc.Runc{ + Command: cmd, + Log: filepath.Join(root, "runc-log.json"), + LogFormat: runc.JSON, + PdeathSignal: syscall.SIGKILL, // this can still leak the process + Setpgid: true, + // we don't execute runc with --rootless=(true|false) explicitly, + // so as to support non-runc runtimes + } + + w := &runcExecutor{ + runc: runtime, + root: root, + cgroupParent: opt.DefaultCgroupParent, + rootless: opt.Rootless, + networkProviders: networkProviders, + processMode: opt.ProcessMode, + idmap: opt.IdentityMapping, + noPivot: opt.NoPivot, + dns: opt.DNS, + oomScoreAdj: opt.OOMScoreAdj, + } + return w, nil +} + +func (w *runcExecutor) Exec(ctx context.Context, meta executor.Meta, root cache.Mountable, mounts []executor.Mount, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error { + provider, ok := w.networkProviders[meta.NetMode] + if !ok { + return errors.Errorf("unknown network mode %s", meta.NetMode) + } + namespace, err := provider.New() + if err != nil { + return err + } + defer namespace.Close() + + if meta.NetMode == pb.NetMode_HOST { + logrus.Info("enabling HostNetworking") + } + + resolvConf, err := oci.GetResolvConf(ctx, w.root, w.idmap, w.dns) + if err != nil { + return err + } + + hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, w.idmap) + if err != nil { + return err + } + if clean != nil { + defer clean() + } + + mountable, err := root.Mount(ctx, false) + if err != nil { + return err + } + + rootMount, release, err := mountable.Mount() + if err != nil { + return err + } + if release != nil { + defer release() + } + + id := identity.NewID() + bundle := filepath.Join(w.root, id) + + if err := os.Mkdir(bundle, 0711); err != nil { + return err + } + defer os.RemoveAll(bundle) + + identity := idtools.Identity{} + if w.idmap != nil { + identity = w.idmap.RootPair() + } + + rootFSPath := filepath.Join(bundle, "rootfs") + if err := idtools.MkdirAllAndChown(rootFSPath, 0700, identity); err != nil { + return err + } + if err := mount.All(rootMount, rootFSPath); err != nil { + return err + } + defer mount.Unmount(rootFSPath, 0) + + uid, gid, sgids, err := oci.GetUser(ctx, rootFSPath, meta.User) + if err != nil { + return err + } + + f, err := os.Create(filepath.Join(bundle, "config.json")) + if err != nil { + return err + } + defer f.Close() + + opts := []containerdoci.SpecOpts{oci.WithUIDGID(uid, gid, sgids)} + + if meta.ReadonlyRootFS { + opts = append(opts, containerdoci.WithRootFSReadonly()) + } + + identity = idtools.Identity{ + UID: int(uid), + GID: int(gid), + } + if w.idmap != nil { + identity, err = w.idmap.ToHost(identity) + if err != nil { + return err + } + } + + if w.cgroupParent != "" { + var cgroupsPath string + lastSeparator := w.cgroupParent[len(w.cgroupParent)-1:] + if strings.Contains(w.cgroupParent, ".slice") && lastSeparator == ":" { + cgroupsPath = w.cgroupParent + id + } else { + cgroupsPath = filepath.Join("/", w.cgroupParent, "buildkit", id) + } + opts = append(opts, containerdoci.WithCgroup(cgroupsPath)) + } + spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.processMode, w.idmap, opts...) + if err != nil { + return err + } + defer cleanup() + + spec.Root.Path = rootFSPath + if _, ok := root.(cache.ImmutableRef); ok { // TODO: pass in with mount, not ref type + spec.Root.Readonly = true + } + + newp, err := fs.RootPath(rootFSPath, meta.Cwd) + if err != nil { + return errors.Wrapf(err, "working dir %s points to invalid target", newp) + } + if _, err := os.Stat(newp); err != nil { + if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil { + return errors.Wrapf(err, "failed to create working directory %s", newp) + } + } + + spec.Process.OOMScoreAdj = w.oomScoreAdj + if w.rootless { + if err := rootlessspecconv.ToRootless(spec); err != nil { + return err + } + } + + if err := json.NewEncoder(f).Encode(spec); err != nil { + return err + } + + // runCtx/killCtx is used for extra check in case the kill command blocks + runCtx, cancelRun := context.WithCancel(context.Background()) + defer cancelRun() + + done := make(chan struct{}) + go func() { + for { + select { + case <-ctx.Done(): + killCtx, timeout := context.WithTimeout(context.Background(), 7*time.Second) + if err := w.runc.Kill(killCtx, id, int(syscall.SIGKILL), nil); err != nil { + logrus.Errorf("failed to kill runc %s: %+v", id, err) + select { + case <-killCtx.Done(): + timeout() + cancelRun() + return + default: + } + } + timeout() + select { + case <-time.After(50 * time.Millisecond): + case <-done: + return + } + case <-done: + return + } + } + }() + + logrus.Debugf("> creating %s %v", id, meta.Args) + status, err := w.runc.Run(runCtx, id, bundle, &runc.CreateOpts{ + IO: &forwardIO{stdin: stdin, stdout: stdout, stderr: stderr}, + NoPivot: w.noPivot, + }) + close(done) + + if status != 0 || err != nil { + if err == nil { + err = errors.Errorf("exit code: %d", status) + } + select { + case <-ctx.Done(): + return errors.Wrapf(ctx.Err(), err.Error()) + default: + return err + } + } + + return nil +} + +type forwardIO struct { + stdin io.ReadCloser + stdout, stderr io.WriteCloser +} + +func (s *forwardIO) Close() error { + return nil +} + +func (s *forwardIO) Set(cmd *exec.Cmd) { + cmd.Stdin = s.stdin + cmd.Stdout = s.stdout + cmd.Stderr = s.stderr +} + +func (s *forwardIO) Stdin() io.WriteCloser { + return nil +} + +func (s *forwardIO) Stdout() io.ReadCloser { + return nil +} + +func (s *forwardIO) Stderr() io.ReadCloser { + return nil +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/export.go b/vendor/github.com/moby/buildkit/exporter/containerimage/export.go new file mode 100644 index 00000000..c803c0ed --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/export.go @@ -0,0 +1,319 @@ +package containerimage + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/remotes/docker" + "github.com/containerd/containerd/rootfs" + "github.com/moby/buildkit/cache/blobs" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/push" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + keyImageName = "name" + keyPush = "push" + keyPushByDigest = "push-by-digest" + keyInsecure = "registry.insecure" + keyUnpack = "unpack" + keyDanglingPrefix = "dangling-name-prefix" + keyNameCanonical = "name-canonical" + keyLayerCompression = "compression" + ociTypes = "oci-mediatypes" +) + +type Opt struct { + SessionManager *session.Manager + ImageWriter *ImageWriter + Images images.Store + RegistryHosts docker.RegistryHosts + LeaseManager leases.Manager +} + +type imageExporter struct { + opt Opt +} + +// New returns a new containerimage exporter instance that supports exporting +// to an image store and pushing the image to registry. +// This exporter supports following values in returned kv map: +// - containerimage.digest - The digest of the root manifest for the image. +func New(opt Opt) (exporter.Exporter, error) { + im := &imageExporter{opt: opt} + return im, nil +} + +func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { + i := &imageExporterInstance{ + imageExporter: e, + layerCompression: blobs.DefaultCompression, + } + + for k, v := range opt { + switch k { + case keyImageName: + i.targetName = v + case keyPush: + if v == "" { + i.push = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.push = b + case keyPushByDigest: + if v == "" { + i.pushByDigest = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.pushByDigest = b + case keyInsecure: + if v == "" { + i.insecure = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.insecure = b + case keyUnpack: + if v == "" { + i.unpack = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.unpack = b + case ociTypes: + if v == "" { + i.ociTypes = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.ociTypes = b + case keyDanglingPrefix: + i.danglingPrefix = v + case keyNameCanonical: + if v == "" { + i.nameCanonical = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.nameCanonical = b + case keyLayerCompression: + switch v { + case "gzip": + i.layerCompression = blobs.Gzip + case "uncompressed": + i.layerCompression = blobs.Uncompressed + default: + return nil, errors.Errorf("unsupported layer compression type: %v", v) + } + default: + if i.meta == nil { + i.meta = make(map[string][]byte) + } + i.meta[k] = []byte(v) + } + } + return i, nil +} + +type imageExporterInstance struct { + *imageExporter + targetName string + push bool + pushByDigest bool + unpack bool + insecure bool + ociTypes bool + nameCanonical bool + danglingPrefix string + layerCompression blobs.CompressionType + meta map[string][]byte +} + +func (e *imageExporterInstance) Name() string { + return "exporting to image" +} + +func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source) (map[string]string, error) { + if src.Metadata == nil { + src.Metadata = make(map[string][]byte) + } + for k, v := range e.meta { + src.Metadata[k] = v + } + + ctx, done, err := leaseutil.WithLease(ctx, e.opt.LeaseManager, leaseutil.MakeTemporary) + if err != nil { + return nil, err + } + defer done(context.TODO()) + + desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, e.layerCompression) + if err != nil { + return nil, err + } + + defer func() { + e.opt.ImageWriter.ContentStore().Delete(context.TODO(), desc.Digest) + }() + + resp := make(map[string]string) + + if n, ok := src.Metadata["image.name"]; e.targetName == "*" && ok { + e.targetName = string(n) + } + + nameCanonical := e.nameCanonical + if e.targetName == "" && e.danglingPrefix != "" { + e.targetName = e.danglingPrefix + "@" + desc.Digest.String() + nameCanonical = false + } + + if e.targetName != "" { + targetNames := strings.Split(e.targetName, ",") + for _, targetName := range targetNames { + if e.opt.Images != nil { + tagDone := oneOffProgress(ctx, "naming to "+targetName) + img := images.Image{ + Target: *desc, + CreatedAt: time.Now(), + } + sfx := []string{""} + if nameCanonical { + sfx = append(sfx, "@"+desc.Digest.String()) + } + for _, sfx := range sfx { + img.Name = targetName + sfx + if _, err := e.opt.Images.Update(ctx, img); err != nil { + if !errdefs.IsNotFound(err) { + return nil, tagDone(err) + } + + if _, err := e.opt.Images.Create(ctx, img); err != nil { + return nil, tagDone(err) + } + } + } + tagDone(nil) + + if e.unpack { + if err := e.unpackImage(ctx, img); err != nil { + return nil, err + } + } + } + if e.push { + if err := push.Push(ctx, e.opt.SessionManager, e.opt.ImageWriter.ContentStore(), desc.Digest, targetName, e.insecure, e.opt.RegistryHosts, e.pushByDigest); err != nil { + return nil, err + } + } + } + resp["image.name"] = e.targetName + } + + resp["containerimage.digest"] = desc.Digest.String() + return resp, nil +} + +func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Image) (err0 error) { + unpackDone := oneOffProgress(ctx, "unpacking to "+img.Name) + defer func() { + unpackDone(err0) + }() + + var ( + contentStore = e.opt.ImageWriter.ContentStore() + applier = e.opt.ImageWriter.Applier() + snapshotter = e.opt.ImageWriter.Snapshotter() + ) + + // fetch manifest by default platform + manifest, err := images.Manifest(ctx, contentStore, img.Target, platforms.Default()) + if err != nil { + return err + } + + layers, err := getLayers(ctx, contentStore, manifest) + if err != nil { + return err + } + + // get containerd snapshotter + ctrdSnapshotter, release := snapshot.NewContainerdSnapshotter(snapshotter) + defer release() + + var chain []digest.Digest + for _, layer := range layers { + if _, err := rootfs.ApplyLayer(ctx, layer, chain, ctrdSnapshotter, applier); err != nil { + return err + } + chain = append(chain, layer.Diff.Digest) + } + + var ( + keyGCLabel = fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", snapshotter.Name()) + valueGCLabel = identity.ChainID(chain).String() + ) + + cinfo := content.Info{ + Digest: manifest.Config.Digest, + Labels: map[string]string{keyGCLabel: valueGCLabel}, + } + _, err = contentStore.Update(ctx, cinfo, fmt.Sprintf("labels.%s", keyGCLabel)) + return err +} + +func getLayers(ctx context.Context, contentStore content.Store, manifest ocispec.Manifest) ([]rootfs.Layer, error) { + diffIDs, err := images.RootFS(ctx, contentStore, manifest.Config) + if err != nil { + return nil, errors.Wrap(err, "failed to resolve rootfs") + } + + if len(diffIDs) != len(manifest.Layers) { + return nil, errors.Errorf("mismatched image rootfs and manifest layers") + } + + layers := make([]rootfs.Layer, len(diffIDs)) + for i := range diffIDs { + layers[i].Diff = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayer, + Digest: diffIDs[i], + } + layers[i].Blob = manifest.Layers[i] + } + return layers, nil +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go new file mode 100644 index 00000000..02e34eb6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go @@ -0,0 +1,16 @@ +package exptypes + +import specs "github.com/opencontainers/image-spec/specs-go/v1" + +const ExporterImageConfigKey = "containerimage.config" +const ExporterInlineCache = "containerimage.inlinecache" +const ExporterPlatformsKey = "refs.platforms" + +type Platforms struct { + Platforms []Platform +} + +type Platform struct { + ID string + Platform specs.Platform +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go b/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go new file mode 100644 index 00000000..28ad29f0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go @@ -0,0 +1,523 @@ +package containerimage + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/blobs" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/system" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +const ( + emptyGZLayer = digest.Digest("sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1") +) + +type WriterOpt struct { + Snapshotter snapshot.Snapshotter + ContentStore content.Store + Applier diff.Applier + Differ diff.Comparer +} + +func NewImageWriter(opt WriterOpt) (*ImageWriter, error) { + return &ImageWriter{opt: opt}, nil +} + +type ImageWriter struct { + opt WriterOpt +} + +func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool, compression blobs.CompressionType) (*ocispec.Descriptor, error) { + platformsBytes, ok := inp.Metadata[exptypes.ExporterPlatformsKey] + + if len(inp.Refs) > 0 && !ok { + return nil, errors.Errorf("unable to export multiple refs, missing platforms mapping") + } + + if len(inp.Refs) == 0 { + layers, err := ic.exportLayers(ctx, compression, inp.Ref) + if err != nil { + return nil, err + } + return ic.commitDistributionManifest(ctx, inp.Ref, inp.Metadata[exptypes.ExporterImageConfigKey], layers[0], oci, inp.Metadata[exptypes.ExporterInlineCache]) + } + + var p exptypes.Platforms + if err := json.Unmarshal(platformsBytes, &p); err != nil { + return nil, errors.Wrapf(err, "failed to parse platforms passed to exporter") + } + + if len(p.Platforms) != len(inp.Refs) { + return nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs)) + } + + refs := make([]cache.ImmutableRef, 0, len(inp.Refs)) + layersMap := make(map[string]int, len(inp.Refs)) + for id, r := range inp.Refs { + layersMap[id] = len(refs) + refs = append(refs, r) + } + + layers, err := ic.exportLayers(ctx, compression, refs...) + if err != nil { + return nil, err + } + + idx := struct { + // MediaType is reserved in the OCI spec but + // excluded from go types. + MediaType string `json:"mediaType,omitempty"` + + ocispec.Index + }{ + MediaType: ocispec.MediaTypeImageIndex, + Index: ocispec.Index{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + }, + } + + if !oci { + idx.MediaType = images.MediaTypeDockerSchema2ManifestList + } + + labels := map[string]string{} + + for i, p := range p.Platforms { + r, ok := inp.Refs[p.ID] + if !ok { + return nil, errors.Errorf("failed to find ref for ID %s", p.ID) + } + config := inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, p.ID)] + + desc, err := ic.commitDistributionManifest(ctx, r, config, layers[layersMap[p.ID]], oci, inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterInlineCache, p.ID)]) + if err != nil { + return nil, err + } + dp := p.Platform + desc.Platform = &dp + idx.Manifests = append(idx.Manifests, *desc) + + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = desc.Digest.String() + } + + idxBytes, err := json.MarshalIndent(idx, "", " ") + if err != nil { + return nil, errors.Wrap(err, "failed to marshal index") + } + + idxDigest := digest.FromBytes(idxBytes) + idxDesc := ocispec.Descriptor{ + Digest: idxDigest, + Size: int64(len(idxBytes)), + MediaType: idx.MediaType, + } + idxDone := oneOffProgress(ctx, "exporting manifest list "+idxDigest.String()) + + if err := content.WriteBlob(ctx, ic.opt.ContentStore, idxDigest.String(), bytes.NewReader(idxBytes), idxDesc, content.WithLabels(labels)); err != nil { + return nil, idxDone(errors.Wrapf(err, "error writing manifest list blob %s", idxDigest)) + } + idxDone(nil) + + return &idxDesc, nil +} + +func (ic *ImageWriter) exportLayers(ctx context.Context, compression blobs.CompressionType, refs ...cache.ImmutableRef) ([][]blobs.DiffPair, error) { + eg, ctx := errgroup.WithContext(ctx) + layersDone := oneOffProgress(ctx, "exporting layers") + + out := make([][]blobs.DiffPair, len(refs)) + + for i, ref := range refs { + func(i int, ref cache.ImmutableRef) { + eg.Go(func() error { + diffPairs, err := blobs.GetDiffPairs(ctx, ic.opt.ContentStore, ic.opt.Differ, ref, true, compression) + if err != nil { + return errors.Wrap(err, "failed calculating diff pairs for exported snapshot") + } + out[i] = diffPairs + return nil + }) + }(i, ref) + } + + if err := layersDone(eg.Wait()); err != nil { + return nil, err + } + + return out, nil +} + +func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache.ImmutableRef, config []byte, layers []blobs.DiffPair, oci bool, cache []byte) (*ocispec.Descriptor, error) { + if len(config) == 0 { + var err error + config, err = emptyImageConfig() + if err != nil { + return nil, err + } + } + + history, err := parseHistoryFromConfig(config) + if err != nil { + return nil, err + } + + diffPairs, history := normalizeLayersAndHistory(layers, history, ref) + + config, err = patchImageConfig(config, diffPairs, history, cache) + if err != nil { + return nil, err + } + + var ( + configDigest = digest.FromBytes(config) + manifestType = ocispec.MediaTypeImageManifest + configType = ocispec.MediaTypeImageConfig + ) + + // Use docker media types for older Docker versions and registries + if !oci { + manifestType = images.MediaTypeDockerSchema2Manifest + configType = images.MediaTypeDockerSchema2Config + } + + mfst := struct { + // MediaType is reserved in the OCI spec but + // excluded from go types. + MediaType string `json:"mediaType,omitempty"` + + ocispec.Manifest + }{ + MediaType: manifestType, + Manifest: ocispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: ocispec.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, + }, + }, + } + + labels := map[string]string{ + "containerd.io/gc.ref.content.0": configDigest.String(), + } + + layerMediaTypes := blobs.GetMediaTypeForLayers(diffPairs, ref) + cs := ic.opt.ContentStore + for i, dp := range diffPairs { + info, err := cs.Info(ctx, dp.Blobsum) + if err != nil { + return nil, errors.Wrapf(err, "could not find blob %s from contentstore", dp.Blobsum) + } + + var layerType string + if len(layerMediaTypes) > i { + layerType = layerMediaTypes[i] + } + + // NOTE: The media type might be missing for some migrated ones + // from before lease based storage. If so, we should detect + // the media type from blob data. + // + // Discussion: https://github.com/moby/buildkit/pull/1277#discussion_r352795429 + if layerType == "" { + layerType, err = blobs.DetectLayerMediaType(ctx, cs, dp.Blobsum, oci) + if err != nil { + return nil, err + } + } + + mfst.Layers = append(mfst.Layers, ocispec.Descriptor{ + Digest: dp.Blobsum, + Size: info.Size, + MediaType: layerType, + }) + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = dp.Blobsum.String() + } + + mfstJSON, err := json.MarshalIndent(mfst, "", " ") + if err != nil { + return nil, errors.Wrap(err, "failed to marshal manifest") + } + + mfstDigest := digest.FromBytes(mfstJSON) + mfstDesc := ocispec.Descriptor{ + Digest: mfstDigest, + Size: int64(len(mfstJSON)), + } + mfstDone := oneOffProgress(ctx, "exporting manifest "+mfstDigest.String()) + + if err := content.WriteBlob(ctx, ic.opt.ContentStore, mfstDigest.String(), bytes.NewReader(mfstJSON), mfstDesc, content.WithLabels((labels))); err != nil { + return nil, mfstDone(errors.Wrapf(err, "error writing manifest blob %s", mfstDigest)) + } + mfstDone(nil) + + configDesc := ocispec.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, + } + configDone := oneOffProgress(ctx, "exporting config "+configDigest.String()) + + if err := content.WriteBlob(ctx, ic.opt.ContentStore, configDigest.String(), bytes.NewReader(config), configDesc); err != nil { + return nil, configDone(errors.Wrap(err, "error writing config blob")) + } + configDone(nil) + + return &ocispec.Descriptor{ + Digest: mfstDigest, + Size: int64(len(mfstJSON)), + MediaType: manifestType, + }, nil +} + +func (ic *ImageWriter) ContentStore() content.Store { + return ic.opt.ContentStore +} + +func (ic *ImageWriter) Snapshotter() snapshot.Snapshotter { + return ic.opt.Snapshotter +} + +func (ic *ImageWriter) Applier() diff.Applier { + return ic.opt.Applier +} + +func emptyImageConfig() ([]byte, error) { + pl := platforms.Normalize(platforms.DefaultSpec()) + + type image struct { + ocispec.Image + + // Variant defines platform variant. To be added to OCI. + Variant string `json:"variant,omitempty"` + } + + img := image{ + Image: ocispec.Image{ + Architecture: pl.Architecture, + OS: pl.OS, + }, + Variant: pl.Variant, + } + img.RootFS.Type = "layers" + img.Config.WorkingDir = "/" + img.Config.Env = []string{"PATH=" + system.DefaultPathEnv} + dt, err := json.Marshal(img) + return dt, errors.Wrap(err, "failed to create empty image config") +} + +func parseHistoryFromConfig(dt []byte) ([]ocispec.History, error) { + var config struct { + History []ocispec.History + } + if err := json.Unmarshal(dt, &config); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal history from config") + } + return config.History, nil +} + +func patchImageConfig(dt []byte, dps []blobs.DiffPair, history []ocispec.History, cache []byte) ([]byte, error) { + m := map[string]json.RawMessage{} + if err := json.Unmarshal(dt, &m); err != nil { + return nil, errors.Wrap(err, "failed to parse image config for patch") + } + + var rootFS ocispec.RootFS + rootFS.Type = "layers" + for _, dp := range dps { + rootFS.DiffIDs = append(rootFS.DiffIDs, dp.DiffID) + } + dt, err := json.Marshal(rootFS) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal rootfs") + } + m["rootfs"] = dt + + dt, err = json.Marshal(history) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal history") + } + m["history"] = dt + + if _, ok := m["created"]; !ok { + var tm *time.Time + for _, h := range history { + if h.Created != nil { + tm = h.Created + } + } + dt, err = json.Marshal(&tm) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal creation time") + } + m["created"] = dt + } + + if cache != nil { + dt, err := json.Marshal(cache) + if err != nil { + return nil, err + } + m["moby.buildkit.cache.v0"] = dt + } + + dt, err = json.Marshal(m) + return dt, errors.Wrap(err, "failed to marshal config after patch") +} + +func normalizeLayersAndHistory(diffs []blobs.DiffPair, history []ocispec.History, ref cache.ImmutableRef) ([]blobs.DiffPair, []ocispec.History) { + + refMeta := getRefMetadata(ref, len(diffs)) + + var historyLayers int + for _, h := range history { + if !h.EmptyLayer { + historyLayers += 1 + } + } + + if historyLayers > len(diffs) { + // this case shouldn't happen but if it does force set history layers empty + // from the bottom + logrus.Warn("invalid image config with unaccounted layers") + historyCopy := make([]ocispec.History, 0, len(history)) + var l int + for _, h := range history { + if l >= len(diffs) { + h.EmptyLayer = true + } + if !h.EmptyLayer { + l++ + } + historyCopy = append(historyCopy, h) + } + history = historyCopy + } + + if len(diffs) > historyLayers { + // some history items are missing. add them based on the ref metadata + for _, md := range refMeta[historyLayers:] { + history = append(history, ocispec.History{ + Created: &md.createdAt, + CreatedBy: md.description, + Comment: "buildkit.exporter.image.v0", + }) + } + } + + var layerIndex int + for i, h := range history { + if !h.EmptyLayer { + if h.Created == nil { + h.Created = &refMeta[layerIndex].createdAt + } + if diffs[layerIndex].Blobsum == emptyGZLayer { + h.EmptyLayer = true + diffs = append(diffs[:layerIndex], diffs[layerIndex+1:]...) + } else { + layerIndex++ + } + } + history[i] = h + } + + // Find the first new layer time. Otherwise, the history item for a first + // metadata command would be the creation time of a base image layer. + // If there is no such then the last layer with timestamp. + var created *time.Time + var noCreatedTime bool + for _, h := range history { + if h.Created != nil { + created = h.Created + if noCreatedTime { + break + } + } else { + noCreatedTime = true + } + } + + // Fill in created times for all history items to be either the first new + // layer time or the previous layer. + noCreatedTime = false + for i, h := range history { + if h.Created != nil { + if noCreatedTime { + created = h.Created + } + } else { + noCreatedTime = true + h.Created = created + } + history[i] = h + } + + return diffs, history +} + +type refMetadata struct { + description string + createdAt time.Time +} + +func getRefMetadata(ref cache.ImmutableRef, limit int) []refMetadata { + if limit <= 0 { + return nil + } + meta := refMetadata{ + description: "created by buildkit", // shouldn't be shown but don't fail build + createdAt: time.Now(), + } + if ref == nil { + return append(getRefMetadata(nil, limit-1), meta) + } + if descr := cache.GetDescription(ref.Metadata()); descr != "" { + meta.description = descr + } + meta.createdAt = cache.GetCreatedAt(ref.Metadata()) + p := ref.Parent() + if p != nil { + defer p.Release(context.TODO()) + } + return append(getRefMetadata(p, limit-1), meta) +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} diff --git a/vendor/github.com/moby/buildkit/exporter/exporter.go b/vendor/github.com/moby/buildkit/exporter/exporter.go new file mode 100644 index 00000000..cd6d1930 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/exporter.go @@ -0,0 +1,22 @@ +package exporter + +import ( + "context" + + "github.com/moby/buildkit/cache" +) + +type Exporter interface { + Resolve(context.Context, map[string]string) (ExporterInstance, error) +} + +type ExporterInstance interface { + Name() string + Export(context.Context, Source) (map[string]string, error) +} + +type Source struct { + Ref cache.ImmutableRef + Refs map[string]cache.ImmutableRef + Metadata map[string][]byte +} diff --git a/vendor/github.com/moby/buildkit/exporter/local/export.go b/vendor/github.com/moby/buildkit/exporter/local/export.go new file mode 100644 index 00000000..5a3f4bd2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/local/export.go @@ -0,0 +1,173 @@ +package local + +import ( + "context" + "io/ioutil" + "os" + "strings" + "time" + + "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/filesync" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/util/progress" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" + fstypes "github.com/tonistiigi/fsutil/types" + "golang.org/x/sync/errgroup" + "golang.org/x/time/rate" +) + +type Opt struct { + SessionManager *session.Manager +} + +type localExporter struct { + opt Opt + // session manager +} + +func New(opt Opt) (exporter.Exporter, error) { + le := &localExporter{opt: opt} + return le, nil +} + +func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { + id := session.FromContext(ctx) + if id == "" { + return nil, errors.New("could not access local files without session") + } + + timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + caller, err := e.opt.SessionManager.Get(timeoutCtx, id) + if err != nil { + return nil, err + } + + li := &localExporterInstance{localExporter: e, caller: caller} + return li, nil +} + +type localExporterInstance struct { + *localExporter + caller session.Caller +} + +func (e *localExporterInstance) Name() string { + return "exporting to client" +} + +func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source) (map[string]string, error) { + isMap := len(inp.Refs) > 0 + + export := func(ctx context.Context, k string, ref cache.ImmutableRef) func() error { + return func() error { + var src string + var err error + var idmap *idtools.IdentityMapping + if ref == nil { + src, err = ioutil.TempDir("", "buildkit") + if err != nil { + return err + } + defer os.RemoveAll(src) + } else { + mount, err := ref.Mount(ctx, true) + if err != nil { + return err + } + + lm := snapshot.LocalMounter(mount) + + src, err = lm.Mount() + if err != nil { + return err + } + + idmap = mount.IdentityMapping() + + defer lm.Unmount() + } + + walkOpt := &fsutil.WalkOpt{} + + if idmap != nil { + walkOpt.Map = func(p string, st *fstypes.Stat) bool { + uid, gid, err := idmap.ToContainer(idtools.Identity{ + UID: int(st.Uid), + GID: int(st.Gid), + }) + if err != nil { + return false + } + st.Uid = uint32(uid) + st.Gid = uint32(gid) + return true + } + } + + fs := fsutil.NewFS(src, walkOpt) + lbl := "copying files" + if isMap { + lbl += " " + k + fs, err = fsutil.SubDirFS([]fsutil.Dir{{FS: fs, Stat: fstypes.Stat{ + Mode: uint32(os.ModeDir | 0755), + Path: strings.Replace(k, "/", "_", -1), + }}}) + if err != nil { + return err + } + } + + progress := newProgressHandler(ctx, lbl) + if err := filesync.CopyToCaller(ctx, fs, e.caller, progress); err != nil { + return err + } + return nil + } + } + + eg, ctx := errgroup.WithContext(ctx) + + if isMap { + for k, ref := range inp.Refs { + eg.Go(export(ctx, k, ref)) + } + } else { + eg.Go(export(ctx, "", inp.Ref)) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + return nil, nil +} + +func newProgressHandler(ctx context.Context, id string) func(int, bool) { + limiter := rate.NewLimiter(rate.Every(100*time.Millisecond), 1) + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + Action: "transferring", + } + pw.Write(id, st) + return func(s int, last bool) { + if last || limiter.Allow() { + st.Current = s + if last { + now := time.Now() + st.Completed = &now + } + pw.Write(id, st) + if last { + pw.Close() + } + } + } +} diff --git a/vendor/github.com/moby/buildkit/exporter/oci/export.go b/vendor/github.com/moby/buildkit/exporter/oci/export.go new file mode 100644 index 00000000..8cac6eaf --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/oci/export.go @@ -0,0 +1,228 @@ +package oci + +import ( + "context" + "strconv" + "strings" + "time" + + archiveexporter "github.com/containerd/containerd/images/archive" + "github.com/containerd/containerd/leases" + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/cache/blobs" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/containerimage" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/filesync" + "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/progress" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type ExporterVariant string + +const ( + keyImageName = "name" + keyLayerCompression = "compression" + VariantOCI = "oci" + VariantDocker = "docker" + ociTypes = "oci-mediatypes" +) + +type Opt struct { + SessionManager *session.Manager + ImageWriter *containerimage.ImageWriter + Variant ExporterVariant + LeaseManager leases.Manager +} + +type imageExporter struct { + opt Opt +} + +func New(opt Opt) (exporter.Exporter, error) { + im := &imageExporter{opt: opt} + return im, nil +} + +func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { + id := session.FromContext(ctx) + if id == "" { + return nil, errors.New("could not access local files without session") + } + + timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + caller, err := e.opt.SessionManager.Get(timeoutCtx, id) + if err != nil { + return nil, err + } + + var ot *bool + i := &imageExporterInstance{ + imageExporter: e, + caller: caller, + layerCompression: blobs.DefaultCompression, + } + for k, v := range opt { + switch k { + case keyImageName: + i.name = v + case keyLayerCompression: + switch v { + case "gzip": + i.layerCompression = blobs.Gzip + case "uncompressed": + i.layerCompression = blobs.Uncompressed + default: + return nil, errors.Errorf("unsupported layer compression type: %v", v) + } + case ociTypes: + ot = new(bool) + if v == "" { + *ot = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + *ot = b + default: + if i.meta == nil { + i.meta = make(map[string][]byte) + } + i.meta[k] = []byte(v) + } + } + if ot == nil { + i.ociTypes = e.opt.Variant == VariantOCI + } else { + i.ociTypes = *ot + } + return i, nil +} + +type imageExporterInstance struct { + *imageExporter + meta map[string][]byte + caller session.Caller + name string + ociTypes bool + layerCompression blobs.CompressionType +} + +func (e *imageExporterInstance) Name() string { + return "exporting to oci image format" +} + +func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source) (map[string]string, error) { + if e.opt.Variant == VariantDocker && len(src.Refs) > 0 { + return nil, errors.Errorf("docker exporter does not currently support exporting manifest lists") + } + + if src.Metadata == nil { + src.Metadata = make(map[string][]byte) + } + for k, v := range e.meta { + src.Metadata[k] = v + } + + ctx, done, err := leaseutil.WithLease(ctx, e.opt.LeaseManager, leaseutil.MakeTemporary) + if err != nil { + return nil, err + } + defer done(context.TODO()) + + desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, e.layerCompression) + if err != nil { + return nil, err + } + defer func() { + e.opt.ImageWriter.ContentStore().Delete(context.TODO(), desc.Digest) + }() + if desc.Annotations == nil { + desc.Annotations = map[string]string{} + } + desc.Annotations[ocispec.AnnotationCreated] = time.Now().UTC().Format(time.RFC3339) + + resp := make(map[string]string) + resp["containerimage.digest"] = desc.Digest.String() + + if n, ok := src.Metadata["image.name"]; e.name == "*" && ok { + e.name = string(n) + } + + names, err := normalizedNames(e.name) + if err != nil { + return nil, err + } + + if len(names) != 0 { + resp["image.name"] = strings.Join(names, ",") + } + + expOpts := []archiveexporter.ExportOpt{archiveexporter.WithManifest(*desc, names...)} + switch e.opt.Variant { + case VariantOCI: + expOpts = append(expOpts, archiveexporter.WithAllPlatforms(), archiveexporter.WithSkipDockerManifest()) + case VariantDocker: + default: + return nil, errors.Errorf("invalid variant %q", e.opt.Variant) + } + + w, err := filesync.CopyFileWriter(ctx, resp, e.caller) + if err != nil { + return nil, err + } + report := oneOffProgress(ctx, "sending tarball") + if err := archiveexporter.Export(ctx, e.opt.ImageWriter.ContentStore(), w, expOpts...); err != nil { + w.Close() + if st, ok := status.FromError(errors.Cause(err)); ok && st.Code() == codes.AlreadyExists { + return resp, report(nil) + } + return nil, report(err) + } + err = w.Close() + if st, ok := status.FromError(errors.Cause(err)); ok && st.Code() == codes.AlreadyExists { + return resp, report(nil) + } + return resp, report(err) +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} + +func normalizedNames(name string) ([]string, error) { + if name == "" { + return nil, nil + } + names := strings.Split(name, ",") + var tagNames = make([]string, len(names)) + for i, name := range names { + parsed, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", name) + } + tagNames[i] = reference.TagNameOnly(parsed).String() + } + return tagNames, nil +} diff --git a/vendor/github.com/moby/buildkit/exporter/tar/export.go b/vendor/github.com/moby/buildkit/exporter/tar/export.go new file mode 100644 index 00000000..365dc576 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/tar/export.go @@ -0,0 +1,177 @@ +package local + +import ( + "context" + "io/ioutil" + "os" + "strings" + "time" + + "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/filesync" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/util/progress" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" + fstypes "github.com/tonistiigi/fsutil/types" +) + +type Opt struct { + SessionManager *session.Manager +} + +type localExporter struct { + opt Opt + // session manager +} + +func New(opt Opt) (exporter.Exporter, error) { + le := &localExporter{opt: opt} + return le, nil +} + +func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { + id := session.FromContext(ctx) + if id == "" { + return nil, errors.New("could not access local files without session") + } + + timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + caller, err := e.opt.SessionManager.Get(timeoutCtx, id) + if err != nil { + return nil, err + } + + li := &localExporterInstance{localExporter: e, caller: caller} + return li, nil +} + +type localExporterInstance struct { + *localExporter + caller session.Caller +} + +func (e *localExporterInstance) Name() string { + return "exporting to client" +} + +func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source) (map[string]string, error) { + var defers []func() + + defer func() { + for i := len(defers) - 1; i >= 0; i-- { + defers[i]() + } + }() + + getDir := func(ctx context.Context, k string, ref cache.ImmutableRef) (*fsutil.Dir, error) { + var src string + var err error + var idmap *idtools.IdentityMapping + if ref == nil { + src, err = ioutil.TempDir("", "buildkit") + if err != nil { + return nil, err + } + defers = append(defers, func() { os.RemoveAll(src) }) + } else { + mount, err := ref.Mount(ctx, true) + if err != nil { + return nil, err + } + + lm := snapshot.LocalMounter(mount) + + src, err = lm.Mount() + if err != nil { + return nil, err + } + + idmap = mount.IdentityMapping() + + defers = append(defers, func() { lm.Unmount() }) + } + + walkOpt := &fsutil.WalkOpt{} + + if idmap != nil { + walkOpt.Map = func(p string, st *fstypes.Stat) bool { + uid, gid, err := idmap.ToContainer(idtools.Identity{ + UID: int(st.Uid), + GID: int(st.Gid), + }) + if err != nil { + return false + } + st.Uid = uint32(uid) + st.Gid = uint32(gid) + return true + } + } + + return &fsutil.Dir{ + FS: fsutil.NewFS(src, walkOpt), + Stat: fstypes.Stat{ + Mode: uint32(os.ModeDir | 0755), + Path: strings.Replace(k, "/", "_", -1), + }, + }, nil + } + + var fs fsutil.FS + + if len(inp.Refs) > 0 { + dirs := make([]fsutil.Dir, 0, len(inp.Refs)) + for k, ref := range inp.Refs { + d, err := getDir(ctx, k, ref) + if err != nil { + return nil, err + } + dirs = append(dirs, *d) + } + var err error + fs, err = fsutil.SubDirFS(dirs) + if err != nil { + return nil, err + } + } else { + d, err := getDir(ctx, "", inp.Ref) + if err != nil { + return nil, err + } + fs = d.FS + } + + w, err := filesync.CopyFileWriter(ctx, nil, e.caller) + if err != nil { + return nil, err + } + report := oneOffProgress(ctx, "sending tarball") + if err := fsutil.WriteTar(ctx, fs, w); err != nil { + w.Close() + return nil, report(err) + } + return nil, report(w.Close()) +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go new file mode 100644 index 00000000..9bc29378 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go @@ -0,0 +1,641 @@ +package builder + +import ( + "archive/tar" + "bytes" + "context" + "encoding/csv" + "encoding/json" + "fmt" + "net" + "path" + "regexp" + "strconv" + "strings" + + "github.com/containerd/containerd/platforms" + "github.com/docker/docker/builder/dockerignore" + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" + "github.com/moby/buildkit/frontend/gateway/client" + gwpb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/apicaps" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +const ( + DefaultLocalNameContext = "context" + DefaultLocalNameDockerfile = "dockerfile" + keyTarget = "target" + keyFilename = "filename" + keyCacheFrom = "cache-from" // for registry only. deprecated in favor of keyCacheImports + keyCacheImports = "cache-imports" // JSON representation of []CacheOptionsEntry + keyCacheNS = "build-arg:BUILDKIT_CACHE_MOUNT_NS" + defaultDockerfileName = "Dockerfile" + dockerignoreFilename = ".dockerignore" + buildArgPrefix = "build-arg:" + labelPrefix = "label:" + keyNoCache = "no-cache" + keyTargetPlatform = "platform" + keyMultiPlatform = "multi-platform" + keyImageResolveMode = "image-resolve-mode" + keyGlobalAddHosts = "add-hosts" + keyForceNetwork = "force-network-mode" + keyOverrideCopyImage = "override-copy-image" // remove after CopyOp implemented + keyNameContext = "contextkey" + keyNameDockerfile = "dockerfilekey" + keyContextSubDir = "contextsubdir" + keyContextKeepGitDir = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR" +) + +var httpPrefix = regexp.MustCompile(`^https?://`) +var gitUrlPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`) + +func Build(ctx context.Context, c client.Client) (*client.Result, error) { + opts := c.BuildOpts().Opts + caps := c.BuildOpts().LLBCaps + gwcaps := c.BuildOpts().Caps + + marshalOpts := []llb.ConstraintsOpt{llb.WithCaps(caps)} + + localNameContext := DefaultLocalNameContext + if v, ok := opts[keyNameContext]; ok { + localNameContext = v + } + + forceLocalDockerfile := false + localNameDockerfile := DefaultLocalNameDockerfile + if v, ok := opts[keyNameDockerfile]; ok { + forceLocalDockerfile = true + localNameDockerfile = v + } + + defaultBuildPlatform := platforms.DefaultSpec() + if workers := c.BuildOpts().Workers; len(workers) > 0 && len(workers[0].Platforms) > 0 { + defaultBuildPlatform = workers[0].Platforms[0] + } + + buildPlatforms := []specs.Platform{defaultBuildPlatform} + targetPlatforms := []*specs.Platform{nil} + if v := opts[keyTargetPlatform]; v != "" { + var err error + targetPlatforms, err = parsePlatforms(v) + if err != nil { + return nil, err + } + } + + resolveMode, err := parseResolveMode(opts[keyImageResolveMode]) + if err != nil { + return nil, err + } + + extraHosts, err := parseExtraHosts(opts[keyGlobalAddHosts]) + if err != nil { + return nil, errors.Wrap(err, "failed to parse additional hosts") + } + + defaultNetMode, err := parseNetMode(opts[keyForceNetwork]) + if err != nil { + return nil, err + } + + filename := opts[keyFilename] + if filename == "" { + filename = defaultDockerfileName + } + + var ignoreCache []string + if v, ok := opts[keyNoCache]; ok { + if v == "" { + ignoreCache = []string{} // means all stages + } else { + ignoreCache = strings.Split(v, ",") + } + } + + name := "load build definition from " + filename + + src := llb.Local(localNameDockerfile, + llb.FollowPaths([]string{filename, filename + ".dockerignore"}), + llb.SessionID(c.BuildOpts().SessionID), + llb.SharedKeyHint(localNameDockerfile), + dockerfile2llb.WithInternalName(name), + ) + + fileop := useFileOp(opts, &caps) + + var buildContext *llb.State + isNotLocalContext := false + if st, ok := detectGitContext(opts[localNameContext], opts[keyContextKeepGitDir]); ok { + if !forceLocalDockerfile { + src = *st + } + buildContext = st + } else if httpPrefix.MatchString(opts[localNameContext]) { + httpContext := llb.HTTP(opts[localNameContext], llb.Filename("context"), dockerfile2llb.WithInternalName("load remote build context")) + def, err := httpContext.Marshal(marshalOpts...) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal httpcontext") + } + res, err := c.Solve(ctx, client.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, errors.Wrapf(err, "failed to resolve httpcontext") + } + + ref, err := res.SingleRef() + if err != nil { + return nil, err + } + + dt, err := ref.ReadFile(ctx, client.ReadRequest{ + Filename: "context", + Range: &client.FileRange{ + Length: 1024, + }, + }) + if err != nil { + return nil, errors.Errorf("failed to read downloaded context") + } + if isArchive(dt) { + if fileop { + bc := llb.Scratch().File(llb.Copy(httpContext, "/context", "/", &llb.CopyInfo{ + AttemptUnpack: true, + })) + if !forceLocalDockerfile { + src = bc + } + buildContext = &bc + } else { + copyImage := opts[keyOverrideCopyImage] + if copyImage == "" { + copyImage = dockerfile2llb.DefaultCopyImage + } + unpack := llb.Image(copyImage, dockerfile2llb.WithInternalName("helper image for file operations")). + Run(llb.Shlex("copy --unpack /src/context /out/"), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("extracting build context")) + unpack.AddMount("/src", httpContext, llb.Readonly) + bc := unpack.AddMount("/out", llb.Scratch()) + if !forceLocalDockerfile { + src = bc + } + buildContext = &bc + } + } else { + filename = "context" + if !forceLocalDockerfile { + src = httpContext + } + buildContext = &httpContext + isNotLocalContext = true + } + } else if (&gwcaps).Supports(gwpb.CapFrontendInputs) == nil { + inputs, err := c.Inputs(ctx) + if err != nil { + return nil, errors.Wrapf(err, "failed to get frontend inputs") + } + + if !forceLocalDockerfile { + inputDockerfile, ok := inputs[DefaultLocalNameDockerfile] + if ok { + src = inputDockerfile + } + } + + inputCtx, ok := inputs[DefaultLocalNameContext] + if ok { + buildContext = &inputCtx + isNotLocalContext = true + } + } + + if buildContext != nil { + if sub, ok := opts[keyContextSubDir]; ok { + buildContext = scopeToSubDir(buildContext, fileop, sub) + } + } + + def, err := src.Marshal(marshalOpts...) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal local source") + } + + eg, ctx2 := errgroup.WithContext(ctx) + var dtDockerfile []byte + var dtDockerignore []byte + var dtDockerignoreDefault []byte + eg.Go(func() error { + res, err := c.Solve(ctx2, client.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return errors.Wrapf(err, "failed to resolve dockerfile") + } + + ref, err := res.SingleRef() + if err != nil { + return err + } + + dtDockerfile, err = ref.ReadFile(ctx2, client.ReadRequest{ + Filename: filename, + }) + if err != nil { + return errors.Wrapf(err, "failed to read dockerfile") + } + + dt, err := ref.ReadFile(ctx2, client.ReadRequest{ + Filename: filename + ".dockerignore", + }) + if err == nil { + dtDockerignore = dt + } + return nil + }) + var excludes []string + if !isNotLocalContext { + eg.Go(func() error { + dockerignoreState := buildContext + if dockerignoreState == nil { + st := llb.Local(localNameContext, + llb.SessionID(c.BuildOpts().SessionID), + llb.FollowPaths([]string{dockerignoreFilename}), + llb.SharedKeyHint(localNameContext+"-"+dockerignoreFilename), + dockerfile2llb.WithInternalName("load "+dockerignoreFilename), + ) + dockerignoreState = &st + } + def, err := dockerignoreState.Marshal(marshalOpts...) + if err != nil { + return err + } + res, err := c.Solve(ctx2, client.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return err + } + ref, err := res.SingleRef() + if err != nil { + return err + } + dtDockerignoreDefault, err = ref.ReadFile(ctx2, client.ReadRequest{ + Filename: dockerignoreFilename, + }) + if err != nil { + return nil + } + return nil + }) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + if dtDockerignore == nil { + dtDockerignore = dtDockerignoreDefault + } + if dtDockerignore != nil { + excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dtDockerignore)) + if err != nil { + return nil, errors.Wrap(err, "failed to parse dockerignore") + } + } + + if _, ok := opts["cmdline"]; !ok { + ref, cmdline, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile)) + if ok { + return forwardGateway(ctx, c, ref, cmdline) + } + } + + exportMap := len(targetPlatforms) > 1 + + if v := opts[keyMultiPlatform]; v != "" { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Errorf("invalid boolean value %s", v) + } + if !b && exportMap { + return nil, errors.Errorf("returning multiple target plaforms is not allowed") + } + exportMap = b + } + + expPlatforms := &exptypes.Platforms{ + Platforms: make([]exptypes.Platform, len(targetPlatforms)), + } + res := client.NewResult() + + eg, ctx = errgroup.WithContext(ctx) + + for i, tp := range targetPlatforms { + func(i int, tp *specs.Platform) { + eg.Go(func() error { + st, img, err := dockerfile2llb.Dockerfile2LLB(ctx, dtDockerfile, dockerfile2llb.ConvertOpt{ + Target: opts[keyTarget], + MetaResolver: c, + BuildArgs: filter(opts, buildArgPrefix), + Labels: filter(opts, labelPrefix), + CacheIDNamespace: opts[keyCacheNS], + SessionID: c.BuildOpts().SessionID, + BuildContext: buildContext, + Excludes: excludes, + IgnoreCache: ignoreCache, + TargetPlatform: tp, + BuildPlatforms: buildPlatforms, + ImageResolveMode: resolveMode, + PrefixPlatform: exportMap, + ExtraHosts: extraHosts, + ForceNetMode: defaultNetMode, + OverrideCopyImage: opts[keyOverrideCopyImage], + LLBCaps: &caps, + }) + + if err != nil { + return errors.Wrapf(err, "failed to create LLB definition") + } + + def, err := st.Marshal() + if err != nil { + return errors.Wrapf(err, "failed to marshal LLB definition") + } + + config, err := json.Marshal(img) + if err != nil { + return errors.Wrapf(err, "failed to marshal image config") + } + + var cacheImports []client.CacheOptionsEntry + // new API + if cacheImportsStr := opts[keyCacheImports]; cacheImportsStr != "" { + var cacheImportsUM []controlapi.CacheOptionsEntry + if err := json.Unmarshal([]byte(cacheImportsStr), &cacheImportsUM); err != nil { + return errors.Wrapf(err, "failed to unmarshal %s (%q)", keyCacheImports, cacheImportsStr) + } + for _, um := range cacheImportsUM { + cacheImports = append(cacheImports, client.CacheOptionsEntry{Type: um.Type, Attrs: um.Attrs}) + } + } + // old API + if cacheFromStr := opts[keyCacheFrom]; cacheFromStr != "" { + cacheFrom := strings.Split(cacheFromStr, ",") + for _, s := range cacheFrom { + im := client.CacheOptionsEntry{ + Type: "registry", + Attrs: map[string]string{ + "ref": s, + }, + } + // FIXME(AkihiroSuda): skip append if already exists + cacheImports = append(cacheImports, im) + } + } + + r, err := c.Solve(ctx, client.SolveRequest{ + Definition: def.ToPB(), + CacheImports: cacheImports, + }) + if err != nil { + return err + } + + ref, err := r.SingleRef() + if err != nil { + return err + } + + if !exportMap { + res.AddMeta(exptypes.ExporterImageConfigKey, config) + res.SetRef(ref) + } else { + p := platforms.DefaultSpec() + if tp != nil { + p = *tp + } + + k := platforms.Format(p) + res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, k), config) + res.AddRef(k, ref) + expPlatforms.Platforms[i] = exptypes.Platform{ + ID: k, + Platform: p, + } + } + return nil + }) + }(i, tp) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + if exportMap { + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, dt) + } + + return res, nil +} + +func forwardGateway(ctx context.Context, c client.Client, ref string, cmdline string) (*client.Result, error) { + opts := c.BuildOpts().Opts + if opts == nil { + opts = map[string]string{} + } + opts["cmdline"] = cmdline + opts["source"] = ref + + gwcaps := c.BuildOpts().Caps + var frontendInputs map[string]*pb.Definition + if (&gwcaps).Supports(gwpb.CapFrontendInputs) == nil { + inputs, err := c.Inputs(ctx) + if err != nil { + return nil, errors.Wrapf(err, "failed to get frontend inputs") + } + + frontendInputs = make(map[string]*pb.Definition) + for name, state := range inputs { + def, err := state.Marshal() + if err != nil { + return nil, err + } + frontendInputs[name] = def.ToPB() + } + } + + return c.Solve(ctx, client.SolveRequest{ + Frontend: "gateway.v0", + FrontendOpt: opts, + FrontendInputs: frontendInputs, + }) +} + +func filter(opt map[string]string, key string) map[string]string { + m := map[string]string{} + for k, v := range opt { + if strings.HasPrefix(k, key) { + m[strings.TrimPrefix(k, key)] = v + } + } + return m +} + +func detectGitContext(ref, gitContext string) (*llb.State, bool) { + found := false + if httpPrefix.MatchString(ref) && gitUrlPathWithFragmentSuffix.MatchString(ref) { + found = true + } + + keepGit := false + if gitContext != "" { + if v, err := strconv.ParseBool(gitContext); err == nil { + keepGit = v + } + } + + for _, prefix := range []string{"git://", "github.com/", "git@"} { + if strings.HasPrefix(ref, prefix) { + found = true + break + } + } + if !found { + return nil, false + } + + parts := strings.SplitN(ref, "#", 2) + branch := "" + if len(parts) > 1 { + branch = parts[1] + } + gitOpts := []llb.GitOption{dockerfile2llb.WithInternalName("load git source " + ref)} + if keepGit { + gitOpts = append(gitOpts, llb.KeepGitDir()) + } + + st := llb.Git(parts[0], branch, gitOpts...) + return &st, true +} + +func isArchive(header []byte) bool { + for _, m := range [][]byte{ + {0x42, 0x5A, 0x68}, // bzip2 + {0x1F, 0x8B, 0x08}, // gzip + {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, // xz + } { + if len(header) < len(m) { + continue + } + if bytes.Equal(m, header[:len(m)]) { + return true + } + } + + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +func parsePlatforms(v string) ([]*specs.Platform, error) { + var pp []*specs.Platform + for _, v := range strings.Split(v, ",") { + p, err := platforms.Parse(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse target platform %s", v) + } + p = platforms.Normalize(p) + pp = append(pp, &p) + } + return pp, nil +} + +func parseResolveMode(v string) (llb.ResolveMode, error) { + switch v { + case pb.AttrImageResolveModeDefault, "": + return llb.ResolveModeDefault, nil + case pb.AttrImageResolveModeForcePull: + return llb.ResolveModeForcePull, nil + case pb.AttrImageResolveModePreferLocal: + return llb.ResolveModePreferLocal, nil + default: + return 0, errors.Errorf("invalid image-resolve-mode: %s", v) + } +} + +func parseExtraHosts(v string) ([]llb.HostIP, error) { + if v == "" { + return nil, nil + } + out := make([]llb.HostIP, 0) + csvReader := csv.NewReader(strings.NewReader(v)) + fields, err := csvReader.Read() + if err != nil { + return nil, err + } + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + if len(parts) != 2 { + return nil, errors.Errorf("invalid key-value pair %s", field) + } + key := strings.ToLower(parts[0]) + val := strings.ToLower(parts[1]) + ip := net.ParseIP(val) + if ip == nil { + return nil, errors.Errorf("failed to parse IP %s", val) + } + out = append(out, llb.HostIP{Host: key, IP: ip}) + } + return out, nil +} + +func parseNetMode(v string) (pb.NetMode, error) { + if v == "" { + return llb.NetModeSandbox, nil + } + switch v { + case "none": + return llb.NetModeNone, nil + case "host": + return llb.NetModeHost, nil + case "sandbox": + return llb.NetModeSandbox, nil + default: + return 0, errors.Errorf("invalid netmode %s", v) + } +} + +func useFileOp(args map[string]string, caps *apicaps.CapSet) bool { + enabled := true + if v, ok := args["build-arg:BUILDKIT_DISABLE_FILEOP"]; ok { + if b, err := strconv.ParseBool(v); err == nil { + enabled = !b + } + } + return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil +} + +func scopeToSubDir(c *llb.State, fileop bool, dir string) *llb.State { + if fileop { + bc := llb.Scratch().File(llb.Copy(*c, dir, "/", &llb.CopyInfo{ + CopyDirContentsOnly: true, + })) + return &bc + } + unpack := llb.Image(dockerfile2llb.DefaultCopyImage, dockerfile2llb.WithInternalName("helper image for file operations")). + Run(llb.Shlexf("copy %s/. /out/", path.Join("/src", dir)), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("filtering build context")) + unpack.AddMount("/src", *c, llb.Readonly) + bc := unpack.AddMount("/out", llb.Scratch()) + return &bc +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go new file mode 100644 index 00000000..f23c6874 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/command/command.go @@ -0,0 +1,46 @@ +// Package command contains the set of Dockerfile commands. +package command + +// Define constants for the command strings +const ( + Add = "add" + Arg = "arg" + Cmd = "cmd" + Copy = "copy" + Entrypoint = "entrypoint" + Env = "env" + Expose = "expose" + From = "from" + Healthcheck = "healthcheck" + Label = "label" + Maintainer = "maintainer" + Onbuild = "onbuild" + Run = "run" + Shell = "shell" + StopSignal = "stopsignal" + User = "user" + Volume = "volume" + Workdir = "workdir" +) + +// Commands is list of all Dockerfile commands +var Commands = map[string]struct{}{ + Add: {}, + Arg: {}, + Cmd: {}, + Copy: {}, + Entrypoint: {}, + Env: {}, + Expose: {}, + From: {}, + Healthcheck: {}, + Label: {}, + Maintainer: {}, + Onbuild: {}, + Run: {}, + Shell: {}, + StopSignal: {}, + User: {}, + Volume: {}, + Workdir: {}, +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go new file mode 100644 index 00000000..b911938a --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go @@ -0,0 +1,1323 @@ +package dockerfile2llb + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "math" + "net/url" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/containerd/containerd/platforms" + "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/signal" + "github.com/docker/go-connections/nat" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/client/llb/imagemetaresolver" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/parser" + "github.com/moby/buildkit/frontend/dockerfile/shell" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/apicaps" + "github.com/moby/buildkit/util/system" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +const ( + emptyImageName = "scratch" + defaultContextLocalName = "context" + historyComment = "buildkit.dockerfile.v0" + + DefaultCopyImage = "docker/dockerfile-copy:v0.1.9@sha256:e8f159d3f00786604b93c675ee2783f8dc194bb565e61ca5788f6a6e9d304061" +) + +type ConvertOpt struct { + Target string + MetaResolver llb.ImageMetaResolver + BuildArgs map[string]string + Labels map[string]string + SessionID string + BuildContext *llb.State + Excludes []string + // IgnoreCache contains names of the stages that should not use build cache. + // Empty slice means ignore cache for all stages. Nil doesn't disable cache. + IgnoreCache []string + // CacheIDNamespace scopes the IDs for different cache mounts + CacheIDNamespace string + ImageResolveMode llb.ResolveMode + TargetPlatform *specs.Platform + BuildPlatforms []specs.Platform + PrefixPlatform bool + ExtraHosts []llb.HostIP + ForceNetMode pb.NetMode + OverrideCopyImage string + LLBCaps *apicaps.CapSet + ContextLocalName string +} + +func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, error) { + if len(dt) == 0 { + return nil, nil, errors.Errorf("the Dockerfile cannot be empty") + } + + if opt.ContextLocalName == "" { + opt.ContextLocalName = defaultContextLocalName + } + + platformOpt := buildPlatformOpt(&opt) + + optMetaArgs := getPlatformArgs(platformOpt) + for i, arg := range optMetaArgs { + optMetaArgs[i] = setKVValue(arg, opt.BuildArgs) + } + + dockerfile, err := parser.Parse(bytes.NewReader(dt)) + if err != nil { + return nil, nil, err + } + + proxyEnv := proxyEnvFromBuildArgs(opt.BuildArgs) + + stages, metaArgs, err := instructions.Parse(dockerfile.AST) + if err != nil { + return nil, nil, err + } + + shlex := shell.NewLex(dockerfile.EscapeToken) + + for _, metaArg := range metaArgs { + if metaArg.Value != nil { + *metaArg.Value, _ = shlex.ProcessWordWithMap(*metaArg.Value, metaArgsToMap(optMetaArgs)) + } + optMetaArgs = append(optMetaArgs, setKVValue(metaArg.KeyValuePairOptional, opt.BuildArgs)) + } + + metaResolver := opt.MetaResolver + if metaResolver == nil { + metaResolver = imagemetaresolver.Default() + } + + allDispatchStates := newDispatchStates() + + // set base state for every image + for i, st := range stages { + name, err := shlex.ProcessWordWithMap(st.BaseName, metaArgsToMap(optMetaArgs)) + if err != nil { + return nil, nil, err + } + if name == "" { + return nil, nil, errors.Errorf("base name (%s) should not be blank", st.BaseName) + } + st.BaseName = name + + ds := &dispatchState{ + stage: st, + deps: make(map[*dispatchState]struct{}), + ctxPaths: make(map[string]struct{}), + stageName: st.Name, + prefixPlatform: opt.PrefixPlatform, + } + + if st.Name == "" { + ds.stageName = fmt.Sprintf("stage-%d", i) + } + + if v := st.Platform; v != "" { + v, err := shlex.ProcessWordWithMap(v, metaArgsToMap(optMetaArgs)) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to process arguments for platform %s", v) + } + + p, err := platforms.Parse(v) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse platform %s", v) + } + ds.platform = &p + } + allDispatchStates.addState(ds) + + total := 0 + if ds.stage.BaseName != emptyImageName && ds.base == nil { + total = 1 + } + for _, cmd := range ds.stage.Commands { + switch cmd.(type) { + case *instructions.AddCommand, *instructions.CopyCommand, *instructions.RunCommand: + total++ + case *instructions.WorkdirCommand: + if useFileOp(opt.BuildArgs, opt.LLBCaps) { + total++ + } + } + } + ds.cmdTotal = total + + if opt.IgnoreCache != nil { + if len(opt.IgnoreCache) == 0 { + ds.ignoreCache = true + } else if st.Name != "" { + for _, n := range opt.IgnoreCache { + if strings.EqualFold(n, st.Name) { + ds.ignoreCache = true + } + } + } + } + } + + var target *dispatchState + if opt.Target == "" { + target = allDispatchStates.lastTarget() + } else { + var ok bool + target, ok = allDispatchStates.findStateByName(opt.Target) + if !ok { + return nil, nil, errors.Errorf("target stage %s could not be found", opt.Target) + } + } + + // fill dependencies to stages so unreachable ones can avoid loading image configs + for _, d := range allDispatchStates.states { + d.commands = make([]command, len(d.stage.Commands)) + for i, cmd := range d.stage.Commands { + newCmd, err := toCommand(cmd, allDispatchStates) + if err != nil { + return nil, nil, err + } + d.commands[i] = newCmd + for _, src := range newCmd.sources { + if src != nil { + d.deps[src] = struct{}{} + if src.unregistered { + allDispatchStates.addState(src) + } + } + } + } + } + + if has, state := hasCircularDependency(allDispatchStates.states); has { + return nil, nil, fmt.Errorf("circular dependency detected on stage: %s", state.stageName) + } + + if len(allDispatchStates.states) == 1 { + allDispatchStates.states[0].stageName = "" + } + + eg, ctx := errgroup.WithContext(ctx) + for i, d := range allDispatchStates.states { + reachable := isReachable(target, d) + // resolve image config for every stage + if d.base == nil { + if d.stage.BaseName == emptyImageName { + d.state = llb.Scratch() + d.image = emptyImage(platformOpt.targetPlatform) + continue + } + func(i int, d *dispatchState) { + eg.Go(func() error { + ref, err := reference.ParseNormalizedNamed(d.stage.BaseName) + if err != nil { + return errors.Wrapf(err, "failed to parse stage name %q", d.stage.BaseName) + } + platform := d.platform + if platform == nil { + platform = &platformOpt.targetPlatform + } + d.stage.BaseName = reference.TagNameOnly(ref).String() + var isScratch bool + if metaResolver != nil && reachable && !d.unregistered { + prefix := "[" + if opt.PrefixPlatform && platform != nil { + prefix += platforms.Format(*platform) + " " + } + prefix += "internal]" + dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, llb.ResolveImageConfigOpt{ + Platform: platform, + ResolveMode: opt.ImageResolveMode.String(), + LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName), + }) + if err == nil { // handle the error while builder is actually running + var img Image + if err := json.Unmarshal(dt, &img); err != nil { + return err + } + img.Created = nil + // if there is no explicit target platform, try to match based on image config + if d.platform == nil && platformOpt.implicitTarget { + p := autoDetectPlatform(img, *platform, platformOpt.buildPlatforms) + platform = &p + } + d.image = img + if dgst != "" { + ref, err = reference.WithDigest(ref, dgst) + if err != nil { + return err + } + } + d.stage.BaseName = ref.String() + if len(img.RootFS.DiffIDs) == 0 { + isScratch = true + // schema1 images can't return diffIDs so double check :( + for _, h := range img.History { + if !h.EmptyLayer { + isScratch = false + break + } + } + } + } + } + if isScratch { + d.state = llb.Scratch() + } else { + d.state = llb.Image(d.stage.BaseName, dfCmd(d.stage.SourceCode), llb.Platform(*platform), opt.ImageResolveMode, llb.WithCustomName(prefixCommand(d, "FROM "+d.stage.BaseName, opt.PrefixPlatform, platform))) + } + d.platform = platform + return nil + }) + }(i, d) + } + } + + if err := eg.Wait(); err != nil { + return nil, nil, err + } + + buildContext := &mutableOutput{} + ctxPaths := map[string]struct{}{} + + for _, d := range allDispatchStates.states { + if !isReachable(target, d) { + continue + } + if d.base != nil { + d.state = d.base.state + d.platform = d.base.platform + d.image = clone(d.base.image) + } + + // make sure that PATH is always set + if _, ok := shell.BuildEnvs(d.image.Config.Env)["PATH"]; !ok { + d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv) + } + + // initialize base metadata from image conf + for _, env := range d.image.Config.Env { + k, v := parseKeyValue(env) + d.state = d.state.AddEnv(k, v) + } + if d.image.Config.WorkingDir != "" { + if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false, nil); err != nil { + return nil, nil, err + } + } + if d.image.Config.User != "" { + if err = dispatchUser(d, &instructions.UserCommand{User: d.image.Config.User}, false); err != nil { + return nil, nil, err + } + } + d.state = d.state.Network(opt.ForceNetMode) + + opt := dispatchOpt{ + allDispatchStates: allDispatchStates, + metaArgs: optMetaArgs, + buildArgValues: opt.BuildArgs, + shlex: shlex, + sessionID: opt.SessionID, + buildContext: llb.NewState(buildContext), + proxyEnv: proxyEnv, + cacheIDNamespace: opt.CacheIDNamespace, + buildPlatforms: platformOpt.buildPlatforms, + targetPlatform: platformOpt.targetPlatform, + extraHosts: opt.ExtraHosts, + copyImage: opt.OverrideCopyImage, + llbCaps: opt.LLBCaps, + } + if opt.copyImage == "" { + opt.copyImage = DefaultCopyImage + } + + if err = dispatchOnBuildTriggers(d, d.image.Config.OnBuild, opt); err != nil { + return nil, nil, err + } + d.image.Config.OnBuild = nil + + for _, cmd := range d.commands { + if err := dispatch(d, cmd, opt); err != nil { + return nil, nil, err + } + } + + for p := range d.ctxPaths { + ctxPaths[p] = struct{}{} + } + } + + if len(opt.Labels) != 0 && target.image.Config.Labels == nil { + target.image.Config.Labels = make(map[string]string, len(opt.Labels)) + } + for k, v := range opt.Labels { + target.image.Config.Labels[k] = v + } + + opts := []llb.LocalOption{ + llb.SessionID(opt.SessionID), + llb.ExcludePatterns(opt.Excludes), + llb.SharedKeyHint(opt.ContextLocalName), + WithInternalName("load build context"), + } + if includePatterns := normalizeContextPaths(ctxPaths); includePatterns != nil { + opts = append(opts, llb.FollowPaths(includePatterns)) + } + + bc := llb.Local(opt.ContextLocalName, opts...) + if opt.BuildContext != nil { + bc = *opt.BuildContext + } + buildContext.Output = bc.Output() + + defaults := []llb.ConstraintsOpt{ + llb.Platform(platformOpt.targetPlatform), + } + if opt.LLBCaps != nil { + defaults = append(defaults, llb.WithCaps(*opt.LLBCaps)) + } + st := target.state.SetMarshalDefaults(defaults...) + + if !platformOpt.implicitTarget { + target.image.OS = platformOpt.targetPlatform.OS + target.image.Architecture = platformOpt.targetPlatform.Architecture + target.image.Variant = platformOpt.targetPlatform.Variant + } + + return &st, &target.image, nil +} + +func metaArgsToMap(metaArgs []instructions.KeyValuePairOptional) map[string]string { + m := map[string]string{} + + for _, arg := range metaArgs { + m[arg.Key] = arg.ValueString() + } + + return m +} + +func toCommand(ic instructions.Command, allDispatchStates *dispatchStates) (command, error) { + cmd := command{Command: ic} + if c, ok := ic.(*instructions.CopyCommand); ok { + if c.From != "" { + var stn *dispatchState + index, err := strconv.Atoi(c.From) + if err != nil { + stn, ok = allDispatchStates.findStateByName(c.From) + if !ok { + stn = &dispatchState{ + stage: instructions.Stage{BaseName: c.From}, + deps: make(map[*dispatchState]struct{}), + unregistered: true, + } + } + } else { + stn, err = allDispatchStates.findStateByIndex(index) + if err != nil { + return command{}, err + } + } + cmd.sources = []*dispatchState{stn} + } + } + + if ok := detectRunMount(&cmd, allDispatchStates); ok { + return cmd, nil + } + + return cmd, nil +} + +type dispatchOpt struct { + allDispatchStates *dispatchStates + metaArgs []instructions.KeyValuePairOptional + buildArgValues map[string]string + shlex *shell.Lex + sessionID string + buildContext llb.State + proxyEnv *llb.ProxyEnv + cacheIDNamespace string + targetPlatform specs.Platform + buildPlatforms []specs.Platform + extraHosts []llb.HostIP + copyImage string + llbCaps *apicaps.CapSet +} + +func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { + if ex, ok := cmd.Command.(instructions.SupportsSingleWordExpansion); ok { + err := ex.Expand(func(word string) (string, error) { + return opt.shlex.ProcessWord(word, d.state.Env()) + }) + if err != nil { + return err + } + } + + var err error + switch c := cmd.Command.(type) { + case *instructions.MaintainerCommand: + err = dispatchMaintainer(d, c) + case *instructions.EnvCommand: + err = dispatchEnv(d, c) + case *instructions.RunCommand: + err = dispatchRun(d, c, opt.proxyEnv, cmd.sources, opt) + case *instructions.WorkdirCommand: + err = dispatchWorkdir(d, c, true, &opt) + case *instructions.AddCommand: + err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, c.Chown, opt) + if err == nil { + for _, src := range c.Sources() { + if !strings.HasPrefix(src, "http://") && !strings.HasPrefix(src, "https://") { + d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{} + } + } + } + case *instructions.LabelCommand: + err = dispatchLabel(d, c) + case *instructions.OnbuildCommand: + err = dispatchOnbuild(d, c) + case *instructions.CmdCommand: + err = dispatchCmd(d, c) + case *instructions.EntrypointCommand: + err = dispatchEntrypoint(d, c) + case *instructions.HealthCheckCommand: + err = dispatchHealthcheck(d, c) + case *instructions.ExposeCommand: + err = dispatchExpose(d, c, opt.shlex) + case *instructions.UserCommand: + err = dispatchUser(d, c, true) + case *instructions.VolumeCommand: + err = dispatchVolume(d, c) + case *instructions.StopSignalCommand: + err = dispatchStopSignal(d, c) + case *instructions.ShellCommand: + err = dispatchShell(d, c) + case *instructions.ArgCommand: + err = dispatchArg(d, c, opt.metaArgs, opt.buildArgValues) + case *instructions.CopyCommand: + l := opt.buildContext + if len(cmd.sources) != 0 { + l = cmd.sources[0].state + } + err = dispatchCopy(d, c.SourcesAndDest, l, false, c, c.Chown, opt) + if err == nil && len(cmd.sources) == 0 { + for _, src := range c.Sources() { + d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{} + } + } + default: + } + return err +} + +type dispatchState struct { + state llb.State + image Image + platform *specs.Platform + stage instructions.Stage + base *dispatchState + deps map[*dispatchState]struct{} + buildArgs []instructions.KeyValuePairOptional + commands []command + ctxPaths map[string]struct{} + ignoreCache bool + cmdSet bool + unregistered bool + stageName string + cmdIndex int + cmdTotal int + prefixPlatform bool +} + +type dispatchStates struct { + states []*dispatchState + statesByName map[string]*dispatchState +} + +func newDispatchStates() *dispatchStates { + return &dispatchStates{statesByName: map[string]*dispatchState{}} +} + +func (dss *dispatchStates) addState(ds *dispatchState) { + dss.states = append(dss.states, ds) + + if d, ok := dss.statesByName[ds.stage.BaseName]; ok { + ds.base = d + } + if ds.stage.Name != "" { + dss.statesByName[strings.ToLower(ds.stage.Name)] = ds + } +} + +func (dss *dispatchStates) findStateByName(name string) (*dispatchState, bool) { + ds, ok := dss.statesByName[strings.ToLower(name)] + return ds, ok +} + +func (dss *dispatchStates) findStateByIndex(index int) (*dispatchState, error) { + if index < 0 || index >= len(dss.states) { + return nil, errors.Errorf("invalid stage index %d", index) + } + + return dss.states[index], nil +} + +func (dss *dispatchStates) lastTarget() *dispatchState { + return dss.states[len(dss.states)-1] +} + +type command struct { + instructions.Command + sources []*dispatchState +} + +func dispatchOnBuildTriggers(d *dispatchState, triggers []string, opt dispatchOpt) error { + for _, trigger := range triggers { + ast, err := parser.Parse(strings.NewReader(trigger)) + if err != nil { + return err + } + if len(ast.AST.Children) != 1 { + return errors.New("onbuild trigger should be a single expression") + } + ic, err := instructions.ParseCommand(ast.AST.Children[0]) + if err != nil { + return err + } + cmd, err := toCommand(ic, opt.allDispatchStates) + if err != nil { + return err + } + if err := dispatch(d, cmd, opt); err != nil { + return err + } + } + return nil +} + +func dispatchEnv(d *dispatchState, c *instructions.EnvCommand) error { + commitMessage := bytes.NewBufferString("ENV") + for _, e := range c.Env { + commitMessage.WriteString(" " + e.String()) + d.state = d.state.AddEnv(e.Key, e.Value) + d.image.Config.Env = addEnv(d.image.Config.Env, e.Key, e.Value) + } + return commitToHistory(&d.image, commitMessage.String(), false, nil) +} + +func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyEnv, sources []*dispatchState, dopt dispatchOpt) error { + var args []string = c.CmdLine + if c.PrependShell { + args = withShell(d.image, args) + } + env := d.state.Env() + opt := []llb.RunOption{llb.Args(args), dfCmd(c)} + if d.ignoreCache { + opt = append(opt, llb.IgnoreCache) + } + if proxy != nil { + opt = append(opt, llb.WithProxy(*proxy)) + } + + runMounts, err := dispatchRunMounts(d, c, sources, dopt) + if err != nil { + return err + } + opt = append(opt, runMounts...) + + securityOpt, err := dispatchRunSecurity(c) + if err != nil { + return err + } + if securityOpt != nil { + opt = append(opt, securityOpt) + } + + networkOpt, err := dispatchRunNetwork(c) + if err != nil { + return err + } + if networkOpt != nil { + opt = append(opt, networkOpt) + } + + shlex := *dopt.shlex + shlex.RawQuotes = true + shlex.SkipUnsetEnv = true + + opt = append(opt, llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(&shlex, c.String(), env)), d.prefixPlatform, d.state.GetPlatform()))) + for _, h := range dopt.extraHosts { + opt = append(opt, llb.AddExtraHost(h.Host, h.IP)) + } + d.state = d.state.Run(opt...).Root() + return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs, shell.BuildEnvs(env)), true, &d.state) +} + +func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool, opt *dispatchOpt) error { + d.state = d.state.Dir(c.Path) + wd := c.Path + if !path.IsAbs(c.Path) { + wd = path.Join("/", d.image.Config.WorkingDir, wd) + } + d.image.Config.WorkingDir = wd + if commit { + withLayer := false + if wd != "/" && opt != nil && useFileOp(opt.buildArgValues, opt.llbCaps) { + mkdirOpt := []llb.MkdirOption{llb.WithParents(true)} + if user := d.image.Config.User; user != "" { + mkdirOpt = append(mkdirOpt, llb.WithUser(user)) + } + platform := opt.targetPlatform + if d.platform != nil { + platform = *d.platform + } + d.state = d.state.File(llb.Mkdir(wd, 0755, mkdirOpt...), llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, c.String(), d.state.Env())), d.prefixPlatform, &platform))) + withLayer = true + } + return commitToHistory(&d.image, "WORKDIR "+wd, withLayer, nil) + } + return nil +} + +func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, opt dispatchOpt) error { + dest := path.Join("/", pathRelativeToWorkingDir(d.state, c.Dest())) + if c.Dest() == "." || c.Dest() == "" || c.Dest()[len(c.Dest())-1] == filepath.Separator { + dest += string(filepath.Separator) + } + + var copyOpt []llb.CopyOption + + if chown != "" { + copyOpt = append(copyOpt, llb.WithUser(chown)) + } + + commitMessage := bytes.NewBufferString("") + if isAddCommand { + commitMessage.WriteString("ADD") + } else { + commitMessage.WriteString("COPY") + } + + var a *llb.FileAction + + for _, src := range c.Sources() { + commitMessage.WriteString(" " + src) + if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { + if !isAddCommand { + return errors.New("source can't be a URL for COPY") + } + + // Resources from remote URLs are not decompressed. + // https://docs.docker.com/engine/reference/builder/#add + // + // Note: mixing up remote archives and local archives in a single ADD instruction + // would result in undefined behavior: https://github.com/moby/buildkit/pull/387#discussion_r189494717 + u, err := url.Parse(src) + f := "__unnamed__" + if err == nil { + if base := path.Base(u.Path); base != "." && base != "/" { + f = base + } + } + + st := llb.HTTP(src, llb.Filename(f), dfCmd(c)) + + opts := append([]llb.CopyOption{&llb.CopyInfo{ + CreateDestPath: true, + }}, copyOpt...) + + if a == nil { + a = llb.Copy(st, f, dest, opts...) + } else { + a = a.Copy(st, f, dest, opts...) + } + } else { + opts := append([]llb.CopyOption{&llb.CopyInfo{ + FollowSymlinks: true, + CopyDirContentsOnly: true, + AttemptUnpack: isAddCommand, + CreateDestPath: true, + AllowWildcard: true, + AllowEmptyWildcard: true, + }}, copyOpt...) + + if a == nil { + a = llb.Copy(sourceState, filepath.Join("/", src), dest, opts...) + } else { + a = a.Copy(sourceState, filepath.Join("/", src), dest, opts...) + } + } + } + + commitMessage.WriteString(" " + c.Dest()) + + platform := opt.targetPlatform + if d.platform != nil { + platform = *d.platform + } + + fileOpt := []llb.ConstraintsOpt{llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), d.state.Env())), d.prefixPlatform, &platform))} + if d.ignoreCache { + fileOpt = append(fileOpt, llb.IgnoreCache) + } + + d.state = d.state.File(a, fileOpt...) + return commitToHistory(&d.image, commitMessage.String(), true, &d.state) +} + +func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, opt dispatchOpt) error { + if useFileOp(opt.buildArgValues, opt.llbCaps) { + return dispatchCopyFileOp(d, c, sourceState, isAddCommand, cmdToPrint, chown, opt) + } + + img := llb.Image(opt.copyImage, llb.MarkImageInternal, llb.Platform(opt.buildPlatforms[0]), WithInternalName("helper image for file operations")) + + dest := path.Join(".", pathRelativeToWorkingDir(d.state, c.Dest())) + if c.Dest() == "." || c.Dest() == "" || c.Dest()[len(c.Dest())-1] == filepath.Separator { + dest += string(filepath.Separator) + } + args := []string{"copy"} + unpack := isAddCommand + + mounts := make([]llb.RunOption, 0, len(c.Sources())) + if chown != "" { + args = append(args, fmt.Sprintf("--chown=%s", chown)) + _, _, err := parseUser(chown) + if err != nil { + mounts = append(mounts, llb.AddMount("/etc/passwd", d.state, llb.SourcePath("/etc/passwd"), llb.Readonly)) + mounts = append(mounts, llb.AddMount("/etc/group", d.state, llb.SourcePath("/etc/group"), llb.Readonly)) + } + } + + commitMessage := bytes.NewBufferString("") + if isAddCommand { + commitMessage.WriteString("ADD") + } else { + commitMessage.WriteString("COPY") + } + + for i, src := range c.Sources() { + commitMessage.WriteString(" " + src) + if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { + if !isAddCommand { + return errors.New("source can't be a URL for COPY") + } + + // Resources from remote URLs are not decompressed. + // https://docs.docker.com/engine/reference/builder/#add + // + // Note: mixing up remote archives and local archives in a single ADD instruction + // would result in undefined behavior: https://github.com/moby/buildkit/pull/387#discussion_r189494717 + unpack = false + u, err := url.Parse(src) + f := "__unnamed__" + if err == nil { + if base := path.Base(u.Path); base != "." && base != "/" { + f = base + } + } + target := path.Join(fmt.Sprintf("/src-%d", i), f) + args = append(args, target) + mounts = append(mounts, llb.AddMount(path.Dir(target), llb.HTTP(src, llb.Filename(f), dfCmd(c)), llb.Readonly)) + } else { + d, f := splitWildcards(src) + targetCmd := fmt.Sprintf("/src-%d", i) + targetMount := targetCmd + if f == "" { + f = path.Base(src) + targetMount = path.Join(targetMount, f) + } + targetCmd = path.Join(targetCmd, f) + args = append(args, targetCmd) + mounts = append(mounts, llb.AddMount(targetMount, sourceState, llb.SourcePath(d), llb.Readonly)) + } + } + + commitMessage.WriteString(" " + c.Dest()) + + args = append(args, dest) + if unpack { + args = append(args[:1], append([]string{"--unpack"}, args[1:]...)...) + } + + platform := opt.targetPlatform + if d.platform != nil { + platform = *d.platform + } + + runOpt := []llb.RunOption{llb.Args(args), llb.Dir("/dest"), llb.ReadonlyRootFS(), dfCmd(cmdToPrint), llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), d.state.Env())), d.prefixPlatform, &platform))} + if d.ignoreCache { + runOpt = append(runOpt, llb.IgnoreCache) + } + + if opt.llbCaps != nil { + if err := opt.llbCaps.Supports(pb.CapExecMetaNetwork); err == nil { + runOpt = append(runOpt, llb.Network(llb.NetModeNone)) + } + } + + run := img.Run(append(runOpt, mounts...)...) + d.state = run.AddMount("/dest", d.state).Platform(platform) + + return commitToHistory(&d.image, commitMessage.String(), true, &d.state) +} + +func dispatchMaintainer(d *dispatchState, c *instructions.MaintainerCommand) error { + d.image.Author = c.Maintainer + return commitToHistory(&d.image, fmt.Sprintf("MAINTAINER %v", c.Maintainer), false, nil) +} + +func dispatchLabel(d *dispatchState, c *instructions.LabelCommand) error { + commitMessage := bytes.NewBufferString("LABEL") + if d.image.Config.Labels == nil { + d.image.Config.Labels = make(map[string]string, len(c.Labels)) + } + for _, v := range c.Labels { + d.image.Config.Labels[v.Key] = v.Value + commitMessage.WriteString(" " + v.String()) + } + return commitToHistory(&d.image, commitMessage.String(), false, nil) +} + +func dispatchOnbuild(d *dispatchState, c *instructions.OnbuildCommand) error { + d.image.Config.OnBuild = append(d.image.Config.OnBuild, c.Expression) + return nil +} + +func dispatchCmd(d *dispatchState, c *instructions.CmdCommand) error { + var args []string = c.CmdLine + if c.PrependShell { + args = withShell(d.image, args) + } + d.image.Config.Cmd = args + d.image.Config.ArgsEscaped = true + d.cmdSet = true + return commitToHistory(&d.image, fmt.Sprintf("CMD %q", args), false, nil) +} + +func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) error { + var args []string = c.CmdLine + if c.PrependShell { + args = withShell(d.image, args) + } + d.image.Config.Entrypoint = args + if !d.cmdSet { + d.image.Config.Cmd = nil + } + return commitToHistory(&d.image, fmt.Sprintf("ENTRYPOINT %q", args), false, nil) +} + +func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) error { + d.image.Config.Healthcheck = &HealthConfig{ + Test: c.Health.Test, + Interval: c.Health.Interval, + Timeout: c.Health.Timeout, + StartPeriod: c.Health.StartPeriod, + Retries: c.Health.Retries, + } + return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil) +} + +func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shell.Lex) error { + ports := []string{} + for _, p := range c.Ports { + ps, err := shlex.ProcessWords(p, d.state.Env()) + if err != nil { + return err + } + ports = append(ports, ps...) + } + c.Ports = ports + + ps, _, err := nat.ParsePortSpecs(c.Ports) + if err != nil { + return err + } + + if d.image.Config.ExposedPorts == nil { + d.image.Config.ExposedPorts = make(map[string]struct{}) + } + for p := range ps { + d.image.Config.ExposedPorts[string(p)] = struct{}{} + } + + return commitToHistory(&d.image, fmt.Sprintf("EXPOSE %v", ps), false, nil) +} + +func dispatchUser(d *dispatchState, c *instructions.UserCommand, commit bool) error { + d.state = d.state.User(c.User) + d.image.Config.User = c.User + if commit { + return commitToHistory(&d.image, fmt.Sprintf("USER %v", c.User), false, nil) + } + return nil +} + +func dispatchVolume(d *dispatchState, c *instructions.VolumeCommand) error { + if d.image.Config.Volumes == nil { + d.image.Config.Volumes = map[string]struct{}{} + } + for _, v := range c.Volumes { + if v == "" { + return errors.New("VOLUME specified can not be an empty string") + } + d.image.Config.Volumes[v] = struct{}{} + } + return commitToHistory(&d.image, fmt.Sprintf("VOLUME %v", c.Volumes), false, nil) +} + +func dispatchStopSignal(d *dispatchState, c *instructions.StopSignalCommand) error { + if _, err := signal.ParseSignal(c.Signal); err != nil { + return err + } + d.image.Config.StopSignal = c.Signal + return commitToHistory(&d.image, fmt.Sprintf("STOPSIGNAL %v", c.Signal), false, nil) +} + +func dispatchShell(d *dispatchState, c *instructions.ShellCommand) error { + d.image.Config.Shell = c.Shell + return commitToHistory(&d.image, fmt.Sprintf("SHELL %v", c.Shell), false, nil) +} + +func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instructions.KeyValuePairOptional, buildArgValues map[string]string) error { + commitStr := "ARG " + c.Key + buildArg := setKVValue(c.KeyValuePairOptional, buildArgValues) + + if c.Value != nil { + commitStr += "=" + *c.Value + } + if buildArg.Value == nil { + for _, ma := range metaArgs { + if ma.Key == buildArg.Key { + buildArg.Value = ma.Value + } + } + } + + if buildArg.Value != nil { + d.state = d.state.AddEnv(buildArg.Key, *buildArg.Value) + } + + d.buildArgs = append(d.buildArgs, buildArg) + return commitToHistory(&d.image, commitStr, false, nil) +} + +func pathRelativeToWorkingDir(s llb.State, p string) string { + if path.IsAbs(p) { + return p + } + return path.Join(s.GetDir(), p) +} + +func splitWildcards(name string) (string, string) { + i := 0 + for ; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + break + } + } + if i == len(name) { + return name, "" + } + + base := path.Base(name[:i]) + if name[:i] == "" || strings.HasSuffix(name[:i], string(filepath.Separator)) { + base = "" + } + return path.Dir(name[:i]), base + name[i:] +} + +func addEnv(env []string, k, v string) []string { + gotOne := false + for i, envVar := range env { + key, _ := parseKeyValue(envVar) + if shell.EqualEnvKeys(key, k) { + env[i] = k + "=" + v + gotOne = true + break + } + } + if !gotOne { + env = append(env, k+"="+v) + } + return env +} + +func parseKeyValue(env string) (string, string) { + parts := strings.SplitN(env, "=", 2) + v := "" + if len(parts) > 1 { + v = parts[1] + } + + return parts[0], v +} + +func setKVValue(kvpo instructions.KeyValuePairOptional, values map[string]string) instructions.KeyValuePairOptional { + if v, ok := values[kvpo.Key]; ok { + kvpo.Value = &v + } + return kvpo +} + +func dfCmd(cmd interface{}) llb.ConstraintsOpt { + // TODO: add fmt.Stringer to instructions.Command to remove interface{} + var cmdStr string + if cmd, ok := cmd.(fmt.Stringer); ok { + cmdStr = cmd.String() + } + if cmd, ok := cmd.(string); ok { + cmdStr = cmd + } + return llb.WithDescription(map[string]string{ + "com.docker.dockerfile.v1.command": cmdStr, + }) +} + +func runCommandString(args []string, buildArgs []instructions.KeyValuePairOptional, envMap map[string]string) string { + var tmpBuildEnv []string + for _, arg := range buildArgs { + v, ok := envMap[arg.Key] + if !ok { + v = arg.ValueString() + } + tmpBuildEnv = append(tmpBuildEnv, arg.Key+"="+v) + } + if len(tmpBuildEnv) > 0 { + tmpBuildEnv = append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...) + } + + return strings.Join(append(tmpBuildEnv, args...), " ") +} + +func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State) error { + if st != nil { + msg += " # buildkit" + } + + img.History = append(img.History, specs.History{ + CreatedBy: msg, + Comment: historyComment, + EmptyLayer: !withLayer, + }) + return nil +} + +func isReachable(from, to *dispatchState) (ret bool) { + if from == nil { + return false + } + if from == to || isReachable(from.base, to) { + return true + } + for d := range from.deps { + if isReachable(d, to) { + return true + } + } + return false +} + +func hasCircularDependency(states []*dispatchState) (bool, *dispatchState) { + var visit func(state *dispatchState) bool + if states == nil { + return false, nil + } + visited := make(map[*dispatchState]struct{}) + path := make(map[*dispatchState]struct{}) + + visit = func(state *dispatchState) bool { + _, ok := visited[state] + if ok { + return false + } + visited[state] = struct{}{} + path[state] = struct{}{} + for dep := range state.deps { + _, ok = path[dep] + if ok { + return true + } + if visit(dep) { + return true + } + } + delete(path, state) + return false + } + for _, state := range states { + if visit(state) { + return true, state + } + } + return false, nil +} + +func parseUser(str string) (uid uint32, gid uint32, err error) { + if str == "" { + return 0, 0, nil + } + parts := strings.SplitN(str, ":", 2) + for i, v := range parts { + switch i { + case 0: + uid, err = parseUID(v) + if err != nil { + return 0, 0, err + } + if len(parts) == 1 { + gid = uid + } + case 1: + gid, err = parseUID(v) + if err != nil { + return 0, 0, err + } + } + } + return +} + +func parseUID(str string) (uint32, error) { + if str == "root" { + return 0, nil + } + uid, err := strconv.ParseUint(str, 10, 32) + if err != nil { + return 0, err + } + return uint32(uid), nil +} + +func normalizeContextPaths(paths map[string]struct{}) []string { + pathSlice := make([]string, 0, len(paths)) + for p := range paths { + if p == "/" { + return nil + } + pathSlice = append(pathSlice, path.Join(".", p)) + } + + sort.Slice(pathSlice, func(i, j int) bool { + return pathSlice[i] < pathSlice[j] + }) + return pathSlice +} + +func proxyEnvFromBuildArgs(args map[string]string) *llb.ProxyEnv { + pe := &llb.ProxyEnv{} + isNil := true + for k, v := range args { + if strings.EqualFold(k, "http_proxy") { + pe.HttpProxy = v + isNil = false + } + if strings.EqualFold(k, "https_proxy") { + pe.HttpsProxy = v + isNil = false + } + if strings.EqualFold(k, "ftp_proxy") { + pe.FtpProxy = v + isNil = false + } + if strings.EqualFold(k, "no_proxy") { + pe.NoProxy = v + isNil = false + } + } + if isNil { + return nil + } + return pe +} + +type mutableOutput struct { + llb.Output +} + +func withShell(img Image, args []string) []string { + var shell []string + if len(img.Config.Shell) > 0 { + shell = append([]string{}, img.Config.Shell...) + } else { + shell = defaultShell() + } + return append(shell, strings.Join(args, " ")) +} + +func autoDetectPlatform(img Image, target specs.Platform, supported []specs.Platform) specs.Platform { + os := img.OS + arch := img.Architecture + if target.OS == os && target.Architecture == arch { + return target + } + for _, p := range supported { + if p.OS == os && p.Architecture == arch { + return p + } + } + return target +} + +func WithInternalName(name string) llb.ConstraintsOpt { + return llb.WithCustomName("[internal] " + name) +} + +func uppercaseCmd(str string) string { + p := strings.SplitN(str, " ", 2) + p[0] = strings.ToUpper(p[0]) + return strings.Join(p, " ") +} + +func processCmdEnv(shlex *shell.Lex, cmd string, env []string) string { + w, err := shlex.ProcessWord(cmd, env) + if err != nil { + return cmd + } + return w +} + +func prefixCommand(ds *dispatchState, str string, prefixPlatform bool, platform *specs.Platform) string { + if ds.cmdTotal == 0 { + return str + } + out := "[" + if prefixPlatform && platform != nil { + out += platforms.Format(*platform) + " " + } + if ds.stageName != "" { + out += ds.stageName + " " + } + ds.cmdIndex++ + out += fmt.Sprintf("%*d/%d] ", int(1+math.Log10(float64(ds.cmdTotal))), ds.cmdIndex, ds.cmdTotal) + return out + str +} + +func useFileOp(args map[string]string, caps *apicaps.CapSet) bool { + enabled := true + if v, ok := args["BUILDKIT_DISABLE_FILEOP"]; ok { + if b, err := strconv.ParseBool(v); err == nil { + enabled = !b + } + } + return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go new file mode 100644 index 00000000..5f0cd086 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go @@ -0,0 +1,16 @@ +// +build !dfrunmount + +package dockerfile2llb + +import ( + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) + +func detectRunMount(cmd *command, allDispatchStates *dispatchStates) bool { + return false +} + +func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*dispatchState, opt dispatchOpt) ([]llb.RunOption, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunnetwork.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunnetwork.go new file mode 100644 index 00000000..300b9d85 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunnetwork.go @@ -0,0 +1,12 @@ +// +build !dfrunnetwork + +package dockerfile2llb + +import ( + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) + +func dispatchRunNetwork(c *instructions.RunCommand) (llb.RunOption, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunsecurity.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunsecurity.go new file mode 100644 index 00000000..10184b42 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunsecurity.go @@ -0,0 +1,12 @@ +// +build !dfrunsecurity + +package dockerfile2llb + +import ( + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) + +func dispatchRunSecurity(c *instructions.RunCommand) (llb.RunOption, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nosecrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nosecrets.go new file mode 100644 index 00000000..d7547021 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nosecrets.go @@ -0,0 +1,13 @@ +// +build dfrunmount,!dfsecrets + +package dockerfile2llb + +import ( + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/pkg/errors" +) + +func dispatchSecret(m *instructions.Mount) (llb.RunOption, error) { + return nil, errors.Errorf("secret mounts not allowed") +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nossh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nossh.go new file mode 100644 index 00000000..8b8afdc3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nossh.go @@ -0,0 +1,13 @@ +// +build dfrunmount,!dfssh + +package dockerfile2llb + +import ( + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/pkg/errors" +) + +func dispatchSSH(m *instructions.Mount) (llb.RunOption, error) { + return nil, errors.Errorf("ssh mounts not allowed") +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go new file mode 100644 index 00000000..33595abb --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go @@ -0,0 +1,156 @@ +// +build dfrunmount + +package dockerfile2llb + +import ( + "fmt" + "os" + "path" + "path/filepath" + "strconv" + "strings" + + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/solver/pb" + "github.com/pkg/errors" +) + +func detectRunMount(cmd *command, allDispatchStates *dispatchStates) bool { + if c, ok := cmd.Command.(*instructions.RunCommand); ok { + mounts := instructions.GetMounts(c) + sources := make([]*dispatchState, len(mounts)) + for i, mount := range mounts { + if mount.From == "" && mount.Type == instructions.MountTypeCache { + mount.From = emptyImageName + } + from := mount.From + if from == "" || mount.Type == instructions.MountTypeTmpfs { + continue + } + stn, ok := allDispatchStates.findStateByName(from) + if !ok { + stn = &dispatchState{ + stage: instructions.Stage{BaseName: from}, + deps: make(map[*dispatchState]struct{}), + unregistered: true, + } + } + sources[i] = stn + } + cmd.sources = sources + return true + } + + return false +} + +func setCacheUIDGIDFileOp(m *instructions.Mount, st llb.State) llb.State { + uid := 0 + gid := 0 + mode := os.FileMode(0755) + if m.UID != nil { + uid = int(*m.UID) + } + if m.GID != nil { + gid = int(*m.GID) + } + if m.Mode != nil { + mode = os.FileMode(*m.Mode) + } + return st.File(llb.Mkdir("/cache", mode, llb.WithUIDGID(uid, gid)), llb.WithCustomName("[internal] settings cache mount permissions")) +} + +func setCacheUIDGID(m *instructions.Mount, st llb.State, fileop bool) llb.State { + if fileop { + return setCacheUIDGIDFileOp(m, st) + } + + var b strings.Builder + if m.UID != nil { + b.WriteString(fmt.Sprintf("chown %d /mnt/cache;", *m.UID)) + } + if m.GID != nil { + b.WriteString(fmt.Sprintf("chown :%d /mnt/cache;", *m.GID)) + } + if m.Mode != nil { + b.WriteString(fmt.Sprintf("chmod %s /mnt/cache;", strconv.FormatUint(*m.Mode, 8))) + } + return llb.Image("busybox").Run(llb.Shlex(fmt.Sprintf("sh -c 'mkdir -p /mnt/cache;%s'", b.String())), llb.WithCustomName("[internal] settings cache mount permissions")).AddMount("/mnt", st) +} + +func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*dispatchState, opt dispatchOpt) ([]llb.RunOption, error) { + var out []llb.RunOption + mounts := instructions.GetMounts(c) + + for i, mount := range mounts { + if mount.From == "" && mount.Type == instructions.MountTypeCache { + mount.From = emptyImageName + } + st := opt.buildContext + if mount.From != "" { + st = sources[i].state + } + var mountOpts []llb.MountOption + if mount.Type == instructions.MountTypeTmpfs { + st = llb.Scratch() + mountOpts = append(mountOpts, llb.Tmpfs()) + } + if mount.Type == instructions.MountTypeSecret { + secret, err := dispatchSecret(mount) + if err != nil { + return nil, err + } + out = append(out, secret) + continue + } + if mount.Type == instructions.MountTypeSSH { + ssh, err := dispatchSSH(mount) + if err != nil { + return nil, err + } + out = append(out, ssh) + continue + } + if mount.ReadOnly { + mountOpts = append(mountOpts, llb.Readonly) + } else if mount.Type == instructions.MountTypeBind && opt.llbCaps.Supports(pb.CapExecMountBindReadWriteNoOuput) == nil { + mountOpts = append(mountOpts, llb.ForceNoOutput) + } + if mount.Type == instructions.MountTypeCache { + sharing := llb.CacheMountShared + if mount.CacheSharing == instructions.MountSharingPrivate { + sharing = llb.CacheMountPrivate + } + if mount.CacheSharing == instructions.MountSharingLocked { + sharing = llb.CacheMountLocked + } + if mount.CacheID == "" { + mount.CacheID = path.Clean(mount.Target) + } + mountOpts = append(mountOpts, llb.AsPersistentCacheDir(opt.cacheIDNamespace+"/"+mount.CacheID, sharing)) + } + target := mount.Target + if !filepath.IsAbs(filepath.Clean(mount.Target)) { + target = filepath.Join("/", d.state.GetDir(), mount.Target) + } + if target == "/" { + return nil, errors.Errorf("invalid mount target %q", target) + } + if src := path.Join("/", mount.Source); src != "/" { + mountOpts = append(mountOpts, llb.SourcePath(src)) + } else { + if mount.UID != nil || mount.GID != nil || mount.Mode != nil { + st = setCacheUIDGID(mount, st, useFileOp(opt.buildArgValues, opt.llbCaps)) + mountOpts = append(mountOpts, llb.SourcePath("/cache")) + } + } + + out = append(out, llb.AddMount(target, st, mountOpts...)) + + if mount.From == "" { + d.ctxPaths[path.Join("/", filepath.ToSlash(mount.Source))] = struct{}{} + } + } + return out, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runnetwork.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runnetwork.go new file mode 100644 index 00000000..01313e23 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runnetwork.go @@ -0,0 +1,26 @@ +// +build dfrunnetwork + +package dockerfile2llb + +import ( + "github.com/pkg/errors" + + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/solver/pb" +) + +func dispatchRunNetwork(c *instructions.RunCommand) (llb.RunOption, error) { + network := instructions.GetNetwork(c) + + switch network { + case instructions.NetworkDefault: + return nil, nil + case instructions.NetworkNone: + return llb.Network(pb.NetMode_NONE), nil + case instructions.NetworkHost: + return llb.Network(pb.NetMode_HOST), nil + default: + return nil, errors.Errorf("unsupported network mode %q", network) + } +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runsecurity.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runsecurity.go new file mode 100644 index 00000000..764424a2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runsecurity.go @@ -0,0 +1,24 @@ +// +build dfrunsecurity + +package dockerfile2llb + +import ( + "github.com/pkg/errors" + + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/solver/pb" +) + +func dispatchRunSecurity(c *instructions.RunCommand) (llb.RunOption, error) { + security := instructions.GetSecurity(c) + + switch security { + case instructions.SecurityInsecure: + return llb.Security(pb.SecurityMode_INSECURE), nil + case instructions.SecuritySandbox: + return llb.Security(pb.SecurityMode_SANDBOX), nil + default: + return nil, errors.Errorf("unsupported security mode %q", security) + } +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go new file mode 100644 index 00000000..59c055a0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go @@ -0,0 +1,54 @@ +// +build dfsecrets + +package dockerfile2llb + +import ( + "path" + + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/pkg/errors" +) + +func dispatchSecret(m *instructions.Mount) (llb.RunOption, error) { + id := m.CacheID + if m.Source != "" { + id = m.Source + } + + if id == "" { + if m.Target == "" { + return nil, errors.Errorf("one of source, target required") + } + id = path.Base(m.Target) + } + + target := m.Target + if target == "" { + target = "/run/secrets/" + path.Base(id) + } + + opts := []llb.SecretOption{llb.SecretID(id)} + + if !m.Required { + opts = append(opts, llb.SecretOptional) + } + + if m.UID != nil || m.GID != nil || m.Mode != nil { + var uid, gid, mode int + if m.UID != nil { + uid = int(*m.UID) + } + if m.GID != nil { + gid = int(*m.GID) + } + if m.Mode != nil { + mode = int(*m.Mode) + } else { + mode = 0400 + } + opts = append(opts, llb.SecretFileOpt(uid, gid, mode)) + } + + return llb.AddSecret(target, opts...), nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go new file mode 100644 index 00000000..a29b5350 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go @@ -0,0 +1,42 @@ +// +build dfssh + +package dockerfile2llb + +import ( + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/pkg/errors" +) + +func dispatchSSH(m *instructions.Mount) (llb.RunOption, error) { + if m.Source != "" { + return nil, errors.Errorf("ssh does not support source") + } + opts := []llb.SSHOption{llb.SSHID(m.CacheID)} + + if m.Target != "" { + opts = append(opts, llb.SSHSocketTarget(m.Target)) + } + + if !m.Required { + opts = append(opts, llb.SSHOptional) + } + + if m.UID != nil || m.GID != nil || m.Mode != nil { + var uid, gid, mode int + if m.UID != nil { + uid = int(*m.UID) + } + if m.GID != nil { + gid = int(*m.GID) + } + if m.Mode != nil { + mode = int(*m.Mode) + } else { + mode = 0600 + } + opts = append(opts, llb.SSHSocketOpt(m.Target, uid, gid, mode)) + } + + return llb.AddSSHSocket(opts...), nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go new file mode 100644 index 00000000..b5d541d1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package dockerfile2llb + +func defaultShell() []string { + return []string{"/bin/sh", "-c"} +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go new file mode 100644 index 00000000..7693e050 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package dockerfile2llb + +func defaultShell() []string { + return []string{"cmd", "/S", "/C"} +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go new file mode 100644 index 00000000..cf06b5ad --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go @@ -0,0 +1,38 @@ +package dockerfile2llb + +import ( + "bufio" + "io" + "regexp" + "strings" +) + +const keySyntax = "syntax" + +var reDirective = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`) + +func DetectSyntax(r io.Reader) (string, string, bool) { + directives := ParseDirectives(r) + if len(directives) == 0 { + return "", "", false + } + v, ok := directives[keySyntax] + if !ok { + return "", "", false + } + p := strings.SplitN(v, " ", 2) + return p[0], v, true +} + +func ParseDirectives(r io.Reader) map[string]string { + m := map[string]string{} + s := bufio.NewScanner(r) + for s.Scan() { + match := reDirective.FindStringSubmatch(s.Text()) + if len(match) == 0 { + return m + } + m[strings.ToLower(match[1])] = match[2] + } + return m +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go new file mode 100644 index 00000000..55e9add2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go @@ -0,0 +1,79 @@ +package dockerfile2llb + +import ( + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/moby/buildkit/util/system" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// ImageConfig is a docker compatible config for an image +type ImageConfig struct { + specs.ImageConfig + + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + + // NetworkDisabled bool `json:",omitempty"` // Is network disabled + // MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + specs.Image + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` + + // Variant defines platform variant. To be added to OCI. + Variant string `json:"variant,omitempty"` +} + +func clone(src Image) Image { + img := src + img.Config = src.Config + img.Config.Env = append([]string{}, src.Config.Env...) + img.Config.Cmd = append([]string{}, src.Config.Cmd...) + img.Config.Entrypoint = append([]string{}, src.Config.Entrypoint...) + return img +} + +func emptyImage(platform specs.Platform) Image { + img := Image{ + Image: specs.Image{ + Architecture: platform.Architecture, + OS: platform.OS, + }, + Variant: platform.Variant, + } + img.RootFS.Type = "layers" + img.Config.WorkingDir = "/" + img.Config.Env = []string{"PATH=" + system.DefaultPathEnv} + return img +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform.go new file mode 100644 index 00000000..e1ef78f8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform.go @@ -0,0 +1,58 @@ +package dockerfile2llb + +import ( + "github.com/containerd/containerd/platforms" + "github.com/moby/buildkit/frontend/dockerfile/instructions" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +type platformOpt struct { + targetPlatform specs.Platform + buildPlatforms []specs.Platform + implicitTarget bool +} + +func buildPlatformOpt(opt *ConvertOpt) *platformOpt { + buildPlatforms := opt.BuildPlatforms + targetPlatform := opt.TargetPlatform + implicitTargetPlatform := false + + if opt.TargetPlatform != nil && opt.BuildPlatforms == nil { + buildPlatforms = []specs.Platform{*opt.TargetPlatform} + } + if len(buildPlatforms) == 0 { + buildPlatforms = []specs.Platform{platforms.DefaultSpec()} + } + + if opt.TargetPlatform == nil { + implicitTargetPlatform = true + targetPlatform = &buildPlatforms[0] + } + + return &platformOpt{ + targetPlatform: *targetPlatform, + buildPlatforms: buildPlatforms, + implicitTarget: implicitTargetPlatform, + } +} + +func getPlatformArgs(po *platformOpt) []instructions.KeyValuePairOptional { + bp := po.buildPlatforms[0] + tp := po.targetPlatform + m := map[string]string{ + "BUILDPLATFORM": platforms.Format(bp), + "BUILDOS": bp.OS, + "BUILDARCH": bp.Architecture, + "BUILDVARIANT": bp.Variant, + "TARGETPLATFORM": platforms.Format(tp), + "TARGETOS": tp.OS, + "TARGETARCH": tp.Architecture, + "TARGETVARIANT": tp.Variant, + } + opts := make([]instructions.KeyValuePairOptional, 0, len(m)) + for k, v := range m { + s := v + opts = append(opts, instructions.KeyValuePairOptional{Key: k, Value: &s}) + } + return opts +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go new file mode 100644 index 00000000..d8bf7473 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go @@ -0,0 +1,200 @@ +package instructions + +import ( + "fmt" + "strings" +) + +// FlagType is the type of the build flag +type FlagType int + +const ( + boolType FlagType = iota + stringType + stringsType +) + +// BFlags contains all flags information for the builder +type BFlags struct { + Args []string // actual flags/args from cmd line + flags map[string]*Flag + used map[string]*Flag + Err error +} + +// Flag contains all information for a flag +type Flag struct { + bf *BFlags + name string + flagType FlagType + Value string + StringValues []string +} + +// NewBFlags returns the new BFlags struct +func NewBFlags() *BFlags { + return &BFlags{ + flags: make(map[string]*Flag), + used: make(map[string]*Flag), + } +} + +// NewBFlagsWithArgs returns the new BFlags struct with Args set to args +func NewBFlagsWithArgs(args []string) *BFlags { + flags := NewBFlags() + flags.Args = args + return flags +} + +// AddBool adds a bool flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddBool(name string, def bool) *Flag { + flag := bf.addFlag(name, boolType) + if flag == nil { + return nil + } + if def { + flag.Value = "true" + } else { + flag.Value = "false" + } + return flag +} + +// AddString adds a string flag to BFlags +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) AddString(name string, def string) *Flag { + flag := bf.addFlag(name, stringType) + if flag == nil { + return nil + } + flag.Value = def + return flag +} + +// AddStrings adds a string flag to BFlags that can match multiple values +func (bf *BFlags) AddStrings(name string) *Flag { + flag := bf.addFlag(name, stringsType) + if flag == nil { + return nil + } + return flag +} + +// addFlag is a generic func used by the other AddXXX() func +// to add a new flag to the BFlags struct. +// Note, any error will be generated when Parse() is called (see Parse). +func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { + if _, ok := bf.flags[name]; ok { + bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) + return nil + } + + newFlag := &Flag{ + bf: bf, + name: name, + flagType: flagType, + } + bf.flags[name] = newFlag + + return newFlag +} + +// IsUsed checks if the flag is used +func (fl *Flag) IsUsed() bool { + if _, ok := fl.bf.used[fl.name]; ok { + return true + } + return false +} + +// IsTrue checks if a bool flag is true +func (fl *Flag) IsTrue() bool { + if fl.flagType != boolType { + // Should never get here + panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) + } + return fl.Value == "true" +} + +// Parse parses and checks if the BFlags is valid. +// Any error noticed during the AddXXX() funcs will be generated/returned +// here. We do this because an error during AddXXX() is more like a +// compile time error so it doesn't matter too much when we stop our +// processing as long as we do stop it, so this allows the code +// around AddXXX() to be just: +// defFlag := AddString("description", "") +// w/o needing to add an if-statement around each one. +func (bf *BFlags) Parse() error { + // If there was an error while defining the possible flags + // go ahead and bubble it back up here since we didn't do it + // earlier in the processing + if bf.Err != nil { + return fmt.Errorf("Error setting up flags: %s", bf.Err) + } + + for _, arg := range bf.Args { + if !strings.HasPrefix(arg, "--") { + return fmt.Errorf("Arg should start with -- : %s", arg) + } + + if arg == "--" { + return nil + } + + arg = arg[2:] + value := "" + + index := strings.Index(arg, "=") + if index >= 0 { + value = arg[index+1:] + arg = arg[:index] + } + + flag, ok := bf.flags[arg] + if !ok { + return fmt.Errorf("Unknown flag: %s", arg) + } + + if _, ok = bf.used[arg]; ok && flag.flagType != stringsType { + return fmt.Errorf("Duplicate flag specified: %s", arg) + } + + bf.used[arg] = flag + + switch flag.flagType { + case boolType: + // value == "" is only ok if no "=" was specified + if index >= 0 && value == "" { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + + lower := strings.ToLower(value) + if lower == "" { + flag.Value = "true" + } else if lower == "true" || lower == "false" { + flag.Value = lower + } else { + return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) + } + + case stringType: + if index < 0 { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + flag.Value = value + + case stringsType: + if index < 0 { + return fmt.Errorf("Missing a value on flag: %s", arg) + } + flag.StringValues = append(flag.StringValues, value) + + default: + panic("No idea what kind of flag we have! Should never get here!") + } + + } + + return nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go new file mode 100644 index 00000000..ed96d7e0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go @@ -0,0 +1,451 @@ +package instructions + +import ( + "errors" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" +) + +// KeyValuePair represent an arbitrary named value (useful in slice instead of map[string] string to preserve ordering) +type KeyValuePair struct { + Key string + Value string +} + +func (kvp *KeyValuePair) String() string { + return kvp.Key + "=" + kvp.Value +} + +// KeyValuePairOptional is the same as KeyValuePair but Value is optional +type KeyValuePairOptional struct { + Key string + Value *string +} + +func (kvpo *KeyValuePairOptional) ValueString() string { + v := "" + if kvpo.Value != nil { + v = *kvpo.Value + } + return v +} + +// Command is implemented by every command present in a dockerfile +type Command interface { + Name() string +} + +// KeyValuePairs is a slice of KeyValuePair +type KeyValuePairs []KeyValuePair + +// withNameAndCode is the base of every command in a Dockerfile (String() returns its source code) +type withNameAndCode struct { + code string + name string +} + +func (c *withNameAndCode) String() string { + return c.code +} + +// Name of the command +func (c *withNameAndCode) Name() string { + return c.name +} + +func newWithNameAndCode(req parseRequest) withNameAndCode { + return withNameAndCode{code: strings.TrimSpace(req.original), name: req.command} +} + +// SingleWordExpander is a provider for variable expansion where 1 word => 1 output +type SingleWordExpander func(word string) (string, error) + +// SupportsSingleWordExpansion interface marks a command as supporting variable expansion +type SupportsSingleWordExpansion interface { + Expand(expander SingleWordExpander) error +} + +// PlatformSpecific adds platform checks to a command +type PlatformSpecific interface { + CheckPlatform(platform string) error +} + +func expandKvp(kvp KeyValuePair, expander SingleWordExpander) (KeyValuePair, error) { + key, err := expander(kvp.Key) + if err != nil { + return KeyValuePair{}, err + } + value, err := expander(kvp.Value) + if err != nil { + return KeyValuePair{}, err + } + return KeyValuePair{Key: key, Value: value}, nil +} +func expandKvpsInPlace(kvps KeyValuePairs, expander SingleWordExpander) error { + for i, kvp := range kvps { + newKvp, err := expandKvp(kvp, expander) + if err != nil { + return err + } + kvps[i] = newKvp + } + return nil +} + +func expandSliceInPlace(values []string, expander SingleWordExpander) error { + for i, v := range values { + newValue, err := expander(v) + if err != nil { + return err + } + values[i] = newValue + } + return nil +} + +// EnvCommand : ENV key1 value1 [keyN valueN...] +type EnvCommand struct { + withNameAndCode + Env KeyValuePairs // kvp slice instead of map to preserve ordering +} + +// Expand variables +func (c *EnvCommand) Expand(expander SingleWordExpander) error { + return expandKvpsInPlace(c.Env, expander) +} + +// MaintainerCommand : MAINTAINER maintainer_name +type MaintainerCommand struct { + withNameAndCode + Maintainer string +} + +// NewLabelCommand creates a new 'LABEL' command +func NewLabelCommand(k string, v string, NoExp bool) *LabelCommand { + kvp := KeyValuePair{Key: k, Value: v} + c := "LABEL " + c += kvp.String() + nc := withNameAndCode{code: c, name: "label"} + cmd := &LabelCommand{ + withNameAndCode: nc, + Labels: KeyValuePairs{ + kvp, + }, + noExpand: NoExp, + } + return cmd +} + +// LabelCommand : LABEL some json data describing the image +// +// Sets the Label variable foo to bar, +// +type LabelCommand struct { + withNameAndCode + Labels KeyValuePairs // kvp slice instead of map to preserve ordering + noExpand bool +} + +// Expand variables +func (c *LabelCommand) Expand(expander SingleWordExpander) error { + if c.noExpand { + return nil + } + return expandKvpsInPlace(c.Labels, expander) +} + +// SourcesAndDest represent a list of source files and a destination +type SourcesAndDest []string + +// Sources list the source paths +func (s SourcesAndDest) Sources() []string { + res := make([]string, len(s)-1) + copy(res, s[:len(s)-1]) + return res +} + +// Dest path of the operation +func (s SourcesAndDest) Dest() string { + return s[len(s)-1] +} + +// AddCommand : ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (http, https) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +type AddCommand struct { + withNameAndCode + SourcesAndDest + Chown string +} + +// Expand variables +func (c *AddCommand) Expand(expander SingleWordExpander) error { + return expandSliceInPlace(c.SourcesAndDest, expander) +} + +// CopyCommand : COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +type CopyCommand struct { + withNameAndCode + SourcesAndDest + From string + Chown string +} + +// Expand variables +func (c *CopyCommand) Expand(expander SingleWordExpander) error { + expandedChown, err := expander(c.Chown) + if err != nil { + return err + } + c.Chown = expandedChown + return expandSliceInPlace(c.SourcesAndDest, expander) +} + +// OnbuildCommand : ONBUILD +type OnbuildCommand struct { + withNameAndCode + Expression string +} + +// WorkdirCommand : WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +type WorkdirCommand struct { + withNameAndCode + Path string +} + +// Expand variables +func (c *WorkdirCommand) Expand(expander SingleWordExpander) error { + p, err := expander(c.Path) + if err != nil { + return err + } + c.Path = p + return nil +} + +// ShellDependantCmdLine represents a cmdline optionally prepended with the shell +type ShellDependantCmdLine struct { + CmdLine strslice.StrSlice + PrependShell bool +} + +// RunCommand : RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under +// Windows, in the event there is only one argument The difference in processing: +// +// RUN echo hi # sh -c echo hi (Linux) +// RUN echo hi # cmd /S /C echo hi (Windows) +// RUN [ "echo", "hi" ] # echo hi +// +type RunCommand struct { + withNameAndCode + withExternalData + ShellDependantCmdLine +} + +// CmdCommand : CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +type CmdCommand struct { + withNameAndCode + ShellDependantCmdLine +} + +// HealthCheckCommand : HEALTHCHECK foo +// +// Set the default healthcheck command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +type HealthCheckCommand struct { + withNameAndCode + Health *container.HealthConfig +} + +// EntrypointCommand : ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments +// to /usr/sbin/nginx. Uses the default shell if not in JSON format. +// +// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint +// is initialized at newBuilder time instead of through argument parsing. +// +type EntrypointCommand struct { + withNameAndCode + ShellDependantCmdLine +} + +// ExposeCommand : EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// req.runConfig.ExposedPorts for runconfig. +// +type ExposeCommand struct { + withNameAndCode + Ports []string +} + +// UserCommand : USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +type UserCommand struct { + withNameAndCode + User string +} + +// Expand variables +func (c *UserCommand) Expand(expander SingleWordExpander) error { + p, err := expander(c.User) + if err != nil { + return err + } + c.User = p + return nil +} + +// VolumeCommand : VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON array form. +// +type VolumeCommand struct { + withNameAndCode + Volumes []string +} + +// Expand variables +func (c *VolumeCommand) Expand(expander SingleWordExpander) error { + return expandSliceInPlace(c.Volumes, expander) +} + +// StopSignalCommand : STOPSIGNAL signal +// +// Set the signal that will be used to kill the container. +type StopSignalCommand struct { + withNameAndCode + Signal string +} + +// Expand variables +func (c *StopSignalCommand) Expand(expander SingleWordExpander) error { + p, err := expander(c.Signal) + if err != nil { + return err + } + c.Signal = p + return nil +} + +// CheckPlatform checks that the command is supported in the target platform +func (c *StopSignalCommand) CheckPlatform(platform string) error { + if platform == "windows" { + return errors.New("The daemon on this platform does not support the command stopsignal") + } + return nil +} + +// ArgCommand : ARG name[=value] +// +// Adds the variable foo to the trusted list of variables that can be passed +// to builder using the --build-arg flag for expansion/substitution or passing to 'run'. +// Dockerfile author may optionally set a default value of this variable. +type ArgCommand struct { + withNameAndCode + KeyValuePairOptional +} + +// Expand variables +func (c *ArgCommand) Expand(expander SingleWordExpander) error { + p, err := expander(c.Key) + if err != nil { + return err + } + c.Key = p + if c.Value != nil { + p, err = expander(*c.Value) + if err != nil { + return err + } + c.Value = &p + } + return nil +} + +// ShellCommand : SHELL powershell -command +// +// Set the non-default shell to use. +type ShellCommand struct { + withNameAndCode + Shell strslice.StrSlice +} + +// Stage represents a single stage in a multi-stage build +type Stage struct { + Name string + Commands []Command + BaseName string + SourceCode string + Platform string +} + +// AddCommand to the stage +func (s *Stage) AddCommand(cmd Command) { + // todo: validate cmd type + s.Commands = append(s.Commands, cmd) +} + +// IsCurrentStage check if the stage name is the current stage +func IsCurrentStage(s []Stage, name string) bool { + if len(s) == 0 { + return false + } + return s[len(s)-1].Name == name +} + +// CurrentStage return the last stage in a slice +func CurrentStage(s []Stage) (*Stage, error) { + if len(s) == 0 { + return nil, errors.New("No build stage in current context") + } + return &s[len(s)-1], nil +} + +// HasStage looks for the presence of a given stage name +func HasStage(s []Stage, name string) (int, bool) { + for i, stage := range s { + // Stage name is case-insensitive by design + if strings.EqualFold(stage.Name, name) { + return i, true + } + } + return -1, false +} + +type withExternalData struct { + m map[interface{}]interface{} +} + +func (c *withExternalData) getExternalValue(k interface{}) interface{} { + return c.m[k] +} + +func (c *withExternalData) setExternalValue(k, v interface{}) { + if c.m == nil { + c.m = map[interface{}]interface{}{} + } + c.m[k] = v +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nosecrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nosecrets.go new file mode 100644 index 00000000..58780648 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nosecrets.go @@ -0,0 +1,7 @@ +// +build !dfsecrets + +package instructions + +func isSecretMountsSupported() bool { + return false +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nossh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nossh.go new file mode 100644 index 00000000..a131a273 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nossh.go @@ -0,0 +1,7 @@ +// +build !dfssh + +package instructions + +func isSSHMountsSupported() bool { + return false +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go new file mode 100644 index 00000000..442877d8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go @@ -0,0 +1,263 @@ +// +build dfrunmount + +package instructions + +import ( + "encoding/csv" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +const MountTypeBind = "bind" +const MountTypeCache = "cache" +const MountTypeTmpfs = "tmpfs" +const MountTypeSecret = "secret" +const MountTypeSSH = "ssh" + +var allowedMountTypes = map[string]struct{}{ + MountTypeBind: {}, + MountTypeCache: {}, + MountTypeTmpfs: {}, + MountTypeSecret: {}, + MountTypeSSH: {}, +} + +const MountSharingShared = "shared" +const MountSharingPrivate = "private" +const MountSharingLocked = "locked" + +var allowedSharingTypes = map[string]struct{}{ + MountSharingShared: {}, + MountSharingPrivate: {}, + MountSharingLocked: {}, +} + +type mountsKeyT string + +var mountsKey = mountsKeyT("dockerfile/run/mounts") + +func init() { + parseRunPreHooks = append(parseRunPreHooks, runMountPreHook) + parseRunPostHooks = append(parseRunPostHooks, runMountPostHook) +} + +func isValidMountType(s string) bool { + if s == "secret" { + if !isSecretMountsSupported() { + return false + } + } + if s == "ssh" { + if !isSSHMountsSupported() { + return false + } + } + _, ok := allowedMountTypes[s] + return ok +} + +func runMountPreHook(cmd *RunCommand, req parseRequest) error { + st := &mountState{} + st.flag = req.flags.AddStrings("mount") + cmd.setExternalValue(mountsKey, st) + return nil +} + +func runMountPostHook(cmd *RunCommand, req parseRequest) error { + st := getMountState(cmd) + if st == nil { + return errors.Errorf("no mount state") + } + var mounts []*Mount + for _, str := range st.flag.StringValues { + m, err := parseMount(str) + if err != nil { + return err + } + mounts = append(mounts, m) + } + st.mounts = mounts + return nil +} + +func getMountState(cmd *RunCommand) *mountState { + v := cmd.getExternalValue(mountsKey) + if v == nil { + return nil + } + return v.(*mountState) +} + +func GetMounts(cmd *RunCommand) []*Mount { + return getMountState(cmd).mounts +} + +type mountState struct { + flag *Flag + mounts []*Mount +} + +type Mount struct { + Type string + From string + Source string + Target string + ReadOnly bool + CacheID string + CacheSharing string + Required bool + Mode *uint64 + UID *uint64 + GID *uint64 +} + +func parseMount(value string) (*Mount, error) { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return nil, errors.Wrap(err, "failed to parse csv mounts") + } + + m := &Mount{Type: MountTypeBind} + + roAuto := true + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + key := strings.ToLower(parts[0]) + + if len(parts) == 1 { + switch key { + case "readonly", "ro": + m.ReadOnly = true + roAuto = false + continue + case "readwrite", "rw": + m.ReadOnly = false + roAuto = false + continue + case "required": + if m.Type == "secret" || m.Type == "ssh" { + m.Required = true + continue + } else { + return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) + } + } + } + + if len(parts) != 2 { + return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) + } + + value := parts[1] + switch key { + case "type": + if !isValidMountType(strings.ToLower(value)) { + return nil, errors.Errorf("unsupported mount type %q", value) + } + m.Type = strings.ToLower(value) + case "from": + m.From = value + case "source", "src": + m.Source = value + case "target", "dst", "destination": + m.Target = value + case "readonly", "ro": + m.ReadOnly, err = strconv.ParseBool(value) + if err != nil { + return nil, errors.Errorf("invalid value for %s: %s", key, value) + } + roAuto = false + case "readwrite", "rw": + rw, err := strconv.ParseBool(value) + if err != nil { + return nil, errors.Errorf("invalid value for %s: %s", key, value) + } + m.ReadOnly = !rw + roAuto = false + case "required": + if m.Type == "secret" || m.Type == "ssh" { + v, err := strconv.ParseBool(value) + if err != nil { + return nil, errors.Errorf("invalid value for %s: %s", key, value) + } + m.Required = v + } else { + return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) + } + case "id": + m.CacheID = value + case "sharing": + if _, ok := allowedSharingTypes[strings.ToLower(value)]; !ok { + return nil, errors.Errorf("unsupported sharing value %q", value) + } + m.CacheSharing = strings.ToLower(value) + case "mode": + mode, err := strconv.ParseUint(value, 8, 32) + if err != nil { + return nil, errors.Errorf("invalid value %s for mode", value) + } + m.Mode = &mode + case "uid": + uid, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return nil, errors.Errorf("invalid value %s for uid", value) + } + m.UID = &uid + case "gid": + gid, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return nil, errors.Errorf("invalid value %s for gid", value) + } + m.GID = &gid + default: + return nil, errors.Errorf("unexpected key '%s' in '%s'", key, field) + } + } + + fileInfoAllowed := m.Type == MountTypeSecret || m.Type == MountTypeSSH || m.Type == MountTypeCache + + if m.Mode != nil && !fileInfoAllowed { + return nil, errors.Errorf("mode not allowed for %q type mounts", m.Type) + } + + if m.UID != nil && !fileInfoAllowed { + return nil, errors.Errorf("uid not allowed for %q type mounts", m.Type) + } + + if m.GID != nil && !fileInfoAllowed { + return nil, errors.Errorf("gid not allowed for %q type mounts", m.Type) + } + + if roAuto { + if m.Type == MountTypeCache || m.Type == MountTypeTmpfs { + m.ReadOnly = false + } else { + m.ReadOnly = true + } + } + + if m.CacheSharing != "" && m.Type != MountTypeCache { + return nil, errors.Errorf("invalid cache sharing set for %v mount", m.Type) + } + + if m.Type == MountTypeSecret { + if m.From != "" { + return nil, errors.Errorf("secret mount should not have a from") + } + if m.CacheSharing != "" { + return nil, errors.Errorf("secret mount should not define sharing") + } + if m.Source == "" && m.Target == "" && m.CacheID == "" { + return nil, errors.Errorf("invalid secret mount. one of source, target required") + } + if m.Source != "" && m.CacheID != "" { + return nil, errors.Errorf("both source and id can't be set") + } + } + + return m, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go new file mode 100644 index 00000000..adef3fd7 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go @@ -0,0 +1,63 @@ +// +build dfrunnetwork + +package instructions + +import ( + "github.com/pkg/errors" +) + +const ( + NetworkDefault = "default" + NetworkNone = "none" + NetworkHost = "host" +) + +var allowedNetwork = map[string]struct{}{ + NetworkDefault: {}, + NetworkNone: {}, + NetworkHost: {}, +} + +func isValidNetwork(value string) bool { + _, ok := allowedNetwork[value] + return ok +} + +var networkKey = "dockerfile/run/network" + +func init() { + parseRunPreHooks = append(parseRunPreHooks, runNetworkPreHook) + parseRunPostHooks = append(parseRunPostHooks, runNetworkPostHook) +} + +func runNetworkPreHook(cmd *RunCommand, req parseRequest) error { + st := &networkState{} + st.flag = req.flags.AddString("network", NetworkDefault) + cmd.setExternalValue(networkKey, st) + return nil +} + +func runNetworkPostHook(cmd *RunCommand, req parseRequest) error { + st := cmd.getExternalValue(networkKey).(*networkState) + if st == nil { + return errors.Errorf("no network state") + } + + value := st.flag.Value + if !isValidNetwork(value) { + return errors.Errorf("invalid network mode %q", value) + } + + st.networkMode = value + + return nil +} + +func GetNetwork(cmd *RunCommand) string { + return cmd.getExternalValue(networkKey).(*networkState).networkMode +} + +type networkState struct { + flag *Flag + networkMode string +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runsecurity.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runsecurity.go new file mode 100644 index 00000000..0c0be806 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runsecurity.go @@ -0,0 +1,61 @@ +// +build dfrunsecurity + +package instructions + +import ( + "github.com/pkg/errors" +) + +const ( + SecurityInsecure = "insecure" + SecuritySandbox = "sandbox" +) + +var allowedSecurity = map[string]struct{}{ + SecurityInsecure: {}, + SecuritySandbox: {}, +} + +func isValidSecurity(value string) bool { + _, ok := allowedSecurity[value] + return ok +} + +var securityKey = "dockerfile/run/security" + +func init() { + parseRunPreHooks = append(parseRunPreHooks, runSecurityPreHook) + parseRunPostHooks = append(parseRunPostHooks, runSecurityPostHook) +} + +func runSecurityPreHook(cmd *RunCommand, req parseRequest) error { + st := &securityState{} + st.flag = req.flags.AddString("security", SecuritySandbox) + cmd.setExternalValue(securityKey, st) + return nil +} + +func runSecurityPostHook(cmd *RunCommand, req parseRequest) error { + st := cmd.getExternalValue(securityKey).(*securityState) + if st == nil { + return errors.Errorf("no security state") + } + + value := st.flag.Value + if !isValidSecurity(value) { + return errors.Errorf("security %q is not valid", value) + } + + st.security = value + + return nil +} + +func GetSecurity(cmd *RunCommand) string { + return cmd.getExternalValue(securityKey).(*securityState).security +} + +type securityState struct { + flag *Flag + security string +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go new file mode 100644 index 00000000..6cce1191 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go @@ -0,0 +1,7 @@ +// +build dfsecrets + +package instructions + +func isSecretMountsSupported() bool { + return true +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go new file mode 100644 index 00000000..0b94a564 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go @@ -0,0 +1,7 @@ +// +build dfssh + +package instructions + +func isSSHMountsSupported() bool { + return true +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go new file mode 100644 index 00000000..0b03b34c --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package instructions + +import "fmt" + +func errNotJSON(command, _ string) error { + return fmt.Errorf("%s requires the arguments to be in JSON form", command) +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go new file mode 100644 index 00000000..a4843c5b --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go @@ -0,0 +1,27 @@ +package instructions + +import ( + "fmt" + "path/filepath" + "regexp" + "strings" +) + +func errNotJSON(command, original string) error { + // For Windows users, give a hint if it looks like it might contain + // a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"], + // as JSON must be escaped. Unfortunate... + // + // Specifically looking for quote-driveletter-colon-backslash, there's no + // double backslash and a [] pair. No, this is not perfect, but it doesn't + // have to be. It's simply a hint to make life a little easier. + extra := "" + original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1))) + if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 && + !strings.Contains(original, `\\`) && + strings.Contains(original, "[") && + strings.Contains(original, "]") { + extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original) + } + return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra) +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go new file mode 100644 index 00000000..5dee06f1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go @@ -0,0 +1,650 @@ +package instructions + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/moby/buildkit/frontend/dockerfile/command" + "github.com/moby/buildkit/frontend/dockerfile/parser" + "github.com/pkg/errors" +) + +type parseRequest struct { + command string + args []string + attributes map[string]bool + flags *BFlags + original string +} + +var parseRunPreHooks []func(*RunCommand, parseRequest) error +var parseRunPostHooks []func(*RunCommand, parseRequest) error + +func nodeArgs(node *parser.Node) []string { + result := []string{} + for ; node.Next != nil; node = node.Next { + arg := node.Next + if len(arg.Children) == 0 { + result = append(result, arg.Value) + } else if len(arg.Children) == 1 { + //sub command + result = append(result, arg.Children[0].Value) + result = append(result, nodeArgs(arg.Children[0])...) + } + } + return result +} + +func newParseRequestFromNode(node *parser.Node) parseRequest { + return parseRequest{ + command: node.Value, + args: nodeArgs(node), + attributes: node.Attributes, + original: node.Original, + flags: NewBFlagsWithArgs(node.Flags), + } +} + +// ParseInstruction converts an AST to a typed instruction (either a command or a build stage beginning when encountering a `FROM` statement) +func ParseInstruction(node *parser.Node) (interface{}, error) { + req := newParseRequestFromNode(node) + switch node.Value { + case command.Env: + return parseEnv(req) + case command.Maintainer: + return parseMaintainer(req) + case command.Label: + return parseLabel(req) + case command.Add: + return parseAdd(req) + case command.Copy: + return parseCopy(req) + case command.From: + return parseFrom(req) + case command.Onbuild: + return parseOnBuild(req) + case command.Workdir: + return parseWorkdir(req) + case command.Run: + return parseRun(req) + case command.Cmd: + return parseCmd(req) + case command.Healthcheck: + return parseHealthcheck(req) + case command.Entrypoint: + return parseEntrypoint(req) + case command.Expose: + return parseExpose(req) + case command.User: + return parseUser(req) + case command.Volume: + return parseVolume(req) + case command.StopSignal: + return parseStopSignal(req) + case command.Arg: + return parseArg(req) + case command.Shell: + return parseShell(req) + } + + return nil, &UnknownInstruction{Instruction: node.Value, Line: node.StartLine} +} + +// ParseCommand converts an AST to a typed Command +func ParseCommand(node *parser.Node) (Command, error) { + s, err := ParseInstruction(node) + if err != nil { + return nil, err + } + if c, ok := s.(Command); ok { + return c, nil + } + return nil, errors.Errorf("%T is not a command type", s) +} + +// UnknownInstruction represents an error occurring when a command is unresolvable +type UnknownInstruction struct { + Line int + Instruction string +} + +func (e *UnknownInstruction) Error() string { + return fmt.Sprintf("unknown instruction: %s", strings.ToUpper(e.Instruction)) +} + +// IsUnknownInstruction checks if the error is an UnknownInstruction or a parseError containing an UnknownInstruction +func IsUnknownInstruction(err error) bool { + _, ok := err.(*UnknownInstruction) + if !ok { + var pe *parseError + if pe, ok = err.(*parseError); ok { + _, ok = pe.inner.(*UnknownInstruction) + } + } + return ok +} + +type parseError struct { + inner error + node *parser.Node +} + +func (e *parseError) Error() string { + return fmt.Sprintf("Dockerfile parse error line %d: %v", e.node.StartLine, e.inner.Error()) +} + +// Parse a Dockerfile into a collection of buildable stages. +// metaArgs is a collection of ARG instructions that occur before the first FROM. +func Parse(ast *parser.Node) (stages []Stage, metaArgs []ArgCommand, err error) { + for _, n := range ast.Children { + cmd, err := ParseInstruction(n) + if err != nil { + return nil, nil, &parseError{inner: err, node: n} + } + if len(stages) == 0 { + // meta arg case + if a, isArg := cmd.(*ArgCommand); isArg { + metaArgs = append(metaArgs, *a) + continue + } + } + switch c := cmd.(type) { + case *Stage: + stages = append(stages, *c) + case Command: + stage, err := CurrentStage(stages) + if err != nil { + return nil, nil, err + } + stage.AddCommand(c) + default: + return nil, nil, errors.Errorf("%T is not a command type", cmd) + } + + } + return stages, metaArgs, nil +} + +func parseKvps(args []string, cmdName string) (KeyValuePairs, error) { + if len(args) == 0 { + return nil, errAtLeastOneArgument(cmdName) + } + if len(args)%2 != 0 { + // should never get here, but just in case + return nil, errTooManyArguments(cmdName) + } + var res KeyValuePairs + for j := 0; j < len(args); j += 2 { + if len(args[j]) == 0 { + return nil, errBlankCommandNames(cmdName) + } + name := args[j] + value := args[j+1] + res = append(res, KeyValuePair{Key: name, Value: value}) + } + return res, nil +} + +func parseEnv(req parseRequest) (*EnvCommand, error) { + + if err := req.flags.Parse(); err != nil { + return nil, err + } + envs, err := parseKvps(req.args, "ENV") + if err != nil { + return nil, err + } + return &EnvCommand{ + Env: envs, + withNameAndCode: newWithNameAndCode(req), + }, nil +} + +func parseMaintainer(req parseRequest) (*MaintainerCommand, error) { + if len(req.args) != 1 { + return nil, errExactlyOneArgument("MAINTAINER") + } + + if err := req.flags.Parse(); err != nil { + return nil, err + } + return &MaintainerCommand{ + Maintainer: req.args[0], + withNameAndCode: newWithNameAndCode(req), + }, nil +} + +func parseLabel(req parseRequest) (*LabelCommand, error) { + + if err := req.flags.Parse(); err != nil { + return nil, err + } + + labels, err := parseKvps(req.args, "LABEL") + if err != nil { + return nil, err + } + + return &LabelCommand{ + Labels: labels, + withNameAndCode: newWithNameAndCode(req), + }, nil +} + +func parseAdd(req parseRequest) (*AddCommand, error) { + if len(req.args) < 2 { + return nil, errNoDestinationArgument("ADD") + } + flChown := req.flags.AddString("chown", "") + if err := req.flags.Parse(); err != nil { + return nil, err + } + return &AddCommand{ + SourcesAndDest: SourcesAndDest(req.args), + withNameAndCode: newWithNameAndCode(req), + Chown: flChown.Value, + }, nil +} + +func parseCopy(req parseRequest) (*CopyCommand, error) { + if len(req.args) < 2 { + return nil, errNoDestinationArgument("COPY") + } + flChown := req.flags.AddString("chown", "") + flFrom := req.flags.AddString("from", "") + if err := req.flags.Parse(); err != nil { + return nil, err + } + return &CopyCommand{ + SourcesAndDest: SourcesAndDest(req.args), + From: flFrom.Value, + withNameAndCode: newWithNameAndCode(req), + Chown: flChown.Value, + }, nil +} + +func parseFrom(req parseRequest) (*Stage, error) { + stageName, err := parseBuildStageName(req.args) + if err != nil { + return nil, err + } + + flPlatform := req.flags.AddString("platform", "") + if err := req.flags.Parse(); err != nil { + return nil, err + } + + code := strings.TrimSpace(req.original) + return &Stage{ + BaseName: req.args[0], + Name: stageName, + SourceCode: code, + Commands: []Command{}, + Platform: flPlatform.Value, + }, nil + +} + +func parseBuildStageName(args []string) (string, error) { + stageName := "" + switch { + case len(args) == 3 && strings.EqualFold(args[1], "as"): + stageName = strings.ToLower(args[2]) + if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok { + return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", args[2]) + } + case len(args) != 1: + return "", errors.New("FROM requires either one or three arguments") + } + + return stageName, nil +} + +func parseOnBuild(req parseRequest) (*OnbuildCommand, error) { + if len(req.args) == 0 { + return nil, errAtLeastOneArgument("ONBUILD") + } + if err := req.flags.Parse(); err != nil { + return nil, err + } + + triggerInstruction := strings.ToUpper(strings.TrimSpace(req.args[0])) + switch strings.ToUpper(triggerInstruction) { + case "ONBUILD": + return nil, errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") + case "MAINTAINER", "FROM": + return nil, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + } + + original := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(req.original, "") + return &OnbuildCommand{ + Expression: original, + withNameAndCode: newWithNameAndCode(req), + }, nil + +} + +func parseWorkdir(req parseRequest) (*WorkdirCommand, error) { + if len(req.args) != 1 { + return nil, errExactlyOneArgument("WORKDIR") + } + + err := req.flags.Parse() + if err != nil { + return nil, err + } + return &WorkdirCommand{ + Path: req.args[0], + withNameAndCode: newWithNameAndCode(req), + }, nil + +} + +func parseShellDependentCommand(req parseRequest, emptyAsNil bool) ShellDependantCmdLine { + args := handleJSONArgs(req.args, req.attributes) + cmd := strslice.StrSlice(args) + if emptyAsNil && len(cmd) == 0 { + cmd = nil + } + return ShellDependantCmdLine{ + CmdLine: cmd, + PrependShell: !req.attributes["json"], + } +} + +func parseRun(req parseRequest) (*RunCommand, error) { + cmd := &RunCommand{} + + for _, fn := range parseRunPreHooks { + if err := fn(cmd, req); err != nil { + return nil, err + } + } + + if err := req.flags.Parse(); err != nil { + return nil, err + } + + cmd.ShellDependantCmdLine = parseShellDependentCommand(req, false) + cmd.withNameAndCode = newWithNameAndCode(req) + + for _, fn := range parseRunPostHooks { + if err := fn(cmd, req); err != nil { + return nil, err + } + } + + return cmd, nil +} + +func parseCmd(req parseRequest) (*CmdCommand, error) { + if err := req.flags.Parse(); err != nil { + return nil, err + } + return &CmdCommand{ + ShellDependantCmdLine: parseShellDependentCommand(req, false), + withNameAndCode: newWithNameAndCode(req), + }, nil + +} + +func parseEntrypoint(req parseRequest) (*EntrypointCommand, error) { + if err := req.flags.Parse(); err != nil { + return nil, err + } + + cmd := &EntrypointCommand{ + ShellDependantCmdLine: parseShellDependentCommand(req, true), + withNameAndCode: newWithNameAndCode(req), + } + + return cmd, nil +} + +// parseOptInterval(flag) is the duration of flag.Value, or 0 if +// empty. An error is reported if the value is given and less than minimum duration. +func parseOptInterval(f *Flag) (time.Duration, error) { + s := f.Value + if s == "" { + return 0, nil + } + d, err := time.ParseDuration(s) + if err != nil { + return 0, err + } + if d < container.MinimumDuration { + return 0, fmt.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration) + } + return d, nil +} +func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) { + if len(req.args) == 0 { + return nil, errAtLeastOneArgument("HEALTHCHECK") + } + cmd := &HealthCheckCommand{ + withNameAndCode: newWithNameAndCode(req), + } + + typ := strings.ToUpper(req.args[0]) + args := req.args[1:] + if typ == "NONE" { + if len(args) != 0 { + return nil, errors.New("HEALTHCHECK NONE takes no arguments") + } + test := strslice.StrSlice{typ} + cmd.Health = &container.HealthConfig{ + Test: test, + } + } else { + + healthcheck := container.HealthConfig{} + + flInterval := req.flags.AddString("interval", "") + flTimeout := req.flags.AddString("timeout", "") + flStartPeriod := req.flags.AddString("start-period", "") + flRetries := req.flags.AddString("retries", "") + + if err := req.flags.Parse(); err != nil { + return nil, err + } + + switch typ { + case "CMD": + cmdSlice := handleJSONArgs(args, req.attributes) + if len(cmdSlice) == 0 { + return nil, errors.New("Missing command after HEALTHCHECK CMD") + } + + if !req.attributes["json"] { + typ = "CMD-SHELL" + } + + healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...)) + default: + return nil, fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) + } + + interval, err := parseOptInterval(flInterval) + if err != nil { + return nil, err + } + healthcheck.Interval = interval + + timeout, err := parseOptInterval(flTimeout) + if err != nil { + return nil, err + } + healthcheck.Timeout = timeout + + startPeriod, err := parseOptInterval(flStartPeriod) + if err != nil { + return nil, err + } + healthcheck.StartPeriod = startPeriod + + if flRetries.Value != "" { + retries, err := strconv.ParseInt(flRetries.Value, 10, 32) + if err != nil { + return nil, err + } + if retries < 1 { + return nil, fmt.Errorf("--retries must be at least 1 (not %d)", retries) + } + healthcheck.Retries = int(retries) + } else { + healthcheck.Retries = 0 + } + + cmd.Health = &healthcheck + } + return cmd, nil +} + +func parseExpose(req parseRequest) (*ExposeCommand, error) { + portsTab := req.args + + if len(req.args) == 0 { + return nil, errAtLeastOneArgument("EXPOSE") + } + + if err := req.flags.Parse(); err != nil { + return nil, err + } + + sort.Strings(portsTab) + return &ExposeCommand{ + Ports: portsTab, + withNameAndCode: newWithNameAndCode(req), + }, nil +} + +func parseUser(req parseRequest) (*UserCommand, error) { + if len(req.args) != 1 { + return nil, errExactlyOneArgument("USER") + } + + if err := req.flags.Parse(); err != nil { + return nil, err + } + return &UserCommand{ + User: req.args[0], + withNameAndCode: newWithNameAndCode(req), + }, nil +} + +func parseVolume(req parseRequest) (*VolumeCommand, error) { + if len(req.args) == 0 { + return nil, errAtLeastOneArgument("VOLUME") + } + + if err := req.flags.Parse(); err != nil { + return nil, err + } + + cmd := &VolumeCommand{ + withNameAndCode: newWithNameAndCode(req), + } + + for _, v := range req.args { + v = strings.TrimSpace(v) + if v == "" { + return nil, errors.New("VOLUME specified can not be an empty string") + } + cmd.Volumes = append(cmd.Volumes, v) + } + return cmd, nil + +} + +func parseStopSignal(req parseRequest) (*StopSignalCommand, error) { + if len(req.args) != 1 { + return nil, errExactlyOneArgument("STOPSIGNAL") + } + sig := req.args[0] + + cmd := &StopSignalCommand{ + Signal: sig, + withNameAndCode: newWithNameAndCode(req), + } + return cmd, nil + +} + +func parseArg(req parseRequest) (*ArgCommand, error) { + if len(req.args) != 1 { + return nil, errExactlyOneArgument("ARG") + } + + kvpo := KeyValuePairOptional{} + + arg := req.args[0] + // 'arg' can just be a name or name-value pair. Note that this is different + // from 'env' that handles the split of name and value at the parser level. + // The reason for doing it differently for 'arg' is that we support just + // defining an arg and not assign it a value (while 'env' always expects a + // name-value pair). If possible, it will be good to harmonize the two. + if strings.Contains(arg, "=") { + parts := strings.SplitN(arg, "=", 2) + if len(parts[0]) == 0 { + return nil, errBlankCommandNames("ARG") + } + + kvpo.Key = parts[0] + kvpo.Value = &parts[1] + } else { + kvpo.Key = arg + } + + return &ArgCommand{ + KeyValuePairOptional: kvpo, + withNameAndCode: newWithNameAndCode(req), + }, nil +} + +func parseShell(req parseRequest) (*ShellCommand, error) { + if err := req.flags.Parse(); err != nil { + return nil, err + } + shellSlice := handleJSONArgs(req.args, req.attributes) + switch { + case len(shellSlice) == 0: + // SHELL [] + return nil, errAtLeastOneArgument("SHELL") + case req.attributes["json"]: + // SHELL ["powershell", "-command"] + + return &ShellCommand{ + Shell: strslice.StrSlice(shellSlice), + withNameAndCode: newWithNameAndCode(req), + }, nil + default: + // SHELL powershell -command - not JSON + return nil, errNotJSON("SHELL", req.original) + } +} + +func errAtLeastOneArgument(command string) error { + return errors.Errorf("%s requires at least one argument", command) +} + +func errExactlyOneArgument(command string) error { + return errors.Errorf("%s requires exactly one argument", command) +} + +func errNoDestinationArgument(command string) error { + return errors.Errorf("%s requires at least two arguments, but only one was provided. Destination could not be determined.", command) +} + +func errBlankCommandNames(command string) error { + return errors.Errorf("%s names can not be blank", command) +} + +func errTooManyArguments(command string) error { + return errors.Errorf("Bad input to %s, too many arguments", command) +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/support.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/support.go new file mode 100644 index 00000000..beefe775 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/support.go @@ -0,0 +1,19 @@ +package instructions + +import "strings" + +// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile +// for exec form it returns untouched args slice +// for shell form it returns concatenated args as the first element of a slice +func handleJSONArgs(args []string, attributes map[string]bool) []string { + if len(args) == 0 { + return []string{} + } + + if attributes != nil && attributes["json"] { + return args + } + + // literal string command, not an exec array + return []string{strings.Join(args, " ")} +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go new file mode 100644 index 00000000..15f00ce7 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go @@ -0,0 +1,368 @@ +package parser + +// line parsers are dispatch calls that parse a single unit of text into a +// Node object which contains the whole statement. Dockerfiles have varied +// (but not usually unique, see ONBUILD for a unique example) parsing rules +// per-command, and these unify the processing in a way that makes it +// manageable. + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +var ( + errDockerfileNotStringArray = errors.New("when using JSON array syntax, arrays must be comprised of strings only") +) + +const ( + commandLabel = "LABEL" +) + +// ignore the current argument. This will still leave a command parsed, but +// will not incorporate the arguments into the ast. +func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) { + return &Node{}, nil, nil +} + +// used for onbuild. Could potentially be used for anything that represents a +// statement with sub-statements. +// +// ONBUILD RUN foo bar -> (onbuild (run foo bar)) +// +func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + child, err := newNodeFromLine(rest, d) + if err != nil { + return nil, nil, err + } + + return &Node{Children: []*Node{child}}, nil, nil +} + +// helper to parse words (i.e space delimited or quoted strings) in a statement. +// The quotes are preserved as part of this function and they are stripped later +// as part of processWords(). +func parseWords(rest string, d *Directive) []string { + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + var chWidth int + + for pos := 0; pos <= len(rest); pos += chWidth { + if pos != len(rest) { + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(rest) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + phase = inWord // found it, fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(rest)) { + if blankOK || len(word) > 0 { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + } + if ch == d.escapeToken { + if pos+chWidth == len(rest) { + continue // just skip an escape token at end of line + } + // If we're not quoted and we see an escape token, then always just + // add the escape token plus the char to the word, even if the char + // is a quote. + word += string(ch) + pos += chWidth + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + } + // The escape token is special except for ' quotes - can't escape anything for ' + if ch == d.escapeToken && quote != '\'' { + if pos+chWidth == len(rest) { + phase = inWord + continue // just skip the escape token at end + } + pos += chWidth + word += string(ch) + ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) + } + word += string(ch) + } + } + + return words +} + +// parse environment like statements. Note that this does *not* handle +// variable interpolation, which will be handled in the evaluator. +func parseNameVal(rest string, key string, d *Directive) (*Node, error) { + // This is kind of tricky because we need to support the old + // variant: KEY name value + // as well as the new one: KEY name=value ... + // The trigger to know which one is being used will be whether we hit + // a space or = first. space ==> old, "=" ==> new + + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil + } + + // Old format (KEY name value) + if !strings.Contains(words[0], "=") { + parts := tokenWhitespace.Split(rest, 2) + if len(parts) < 2 { + return nil, fmt.Errorf(key + " must have two arguments") + } + return newKeyValueNode(parts[0], parts[1]), nil + } + + var rootNode *Node + var prevNode *Node + for _, word := range words { + if !strings.Contains(word, "=") { + return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + } + + parts := strings.SplitN(word, "=", 2) + node := newKeyValueNode(parts[0], parts[1]) + rootNode, prevNode = appendKeyValueNode(node, rootNode, prevNode) + } + + return rootNode, nil +} + +func newKeyValueNode(key, value string) *Node { + return &Node{ + Value: key, + Next: &Node{Value: value}, + } +} + +func appendKeyValueNode(node, rootNode, prevNode *Node) (*Node, *Node) { + if rootNode == nil { + rootNode = node + } + if prevNode != nil { + prevNode.Next = node + } + + prevNode = node.Next + return rootNode, prevNode +} + +func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) { + node, err := parseNameVal(rest, "ENV", d) + return node, nil, err +} + +func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) { + node, err := parseNameVal(rest, commandLabel, d) + return node, nil, err +} + +// parses a statement containing one or more keyword definition(s) and/or +// value assignments, like `name1 name2= name3="" name4=value`. +// Note that this is a stricter format than the old format of assignment, +// allowed by parseNameVal(), in a way that this only allows assignment of the +// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. +// In addition, a keyword definition alone is of the form `keyword` like `name1` +// above. And the assignments `name2=` and `name3=""` are equivalent and +// assign an empty value to the respective keywords. +func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) { + words := parseWords(rest, d) + if len(words) == 0 { + return nil, nil, nil + } + + var ( + rootnode *Node + prevNode *Node + ) + for i, word := range words { + node := &Node{} + node.Value = word + if i == 0 { + rootnode = node + } else { + prevNode.Next = node + } + prevNode = node + } + + return rootnode, nil, nil +} + +// parses a whitespace-delimited set of arguments. The result is effectively a +// linked list of string arguments. +func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node := &Node{} + rootnode := node + prevnode := node + for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp + prevnode = node + node.Value = str + node.Next = &Node{} + node = node.Next + } + + // XXX to get around regexp.Split *always* providing an empty string at the + // end due to how our loop is constructed, nil out the last node in the + // chain. + prevnode.Next = nil + + return rootnode, nil, nil +} + +// parseString just wraps the string in quotes and returns a working node. +func parseString(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + n := &Node{} + n.Value = rest + return n, nil, nil +} + +// parseJSON converts JSON arrays to an AST. +func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) { + rest = strings.TrimLeftFunc(rest, unicode.IsSpace) + if !strings.HasPrefix(rest, "[") { + return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) + } + + var myJSON []interface{} + if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { + return nil, nil, err + } + + var top, prev *Node + for _, str := range myJSON { + s, ok := str.(string) + if !ok { + return nil, nil, errDockerfileNotStringArray + } + + node := &Node{Value: s} + if prev == nil { + top = node + } else { + prev.Next = node + } + prev = node + } + + return top, map[string]bool{"json": true}, nil +} + +// parseMaybeJSON determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, quotes the result and returns a single +// node. +func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) { + if rest == "" { + return nil, nil, nil + } + + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + node = &Node{} + node.Value = rest + return node, nil, nil +} + +// parseMaybeJSONToList determines if the argument appears to be a JSON array. If +// so, passes to parseJSON; if not, attempts to parse it as a whitespace +// delimited string. +func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) { + node, attrs, err := parseJSON(rest, d) + + if err == nil { + return node, attrs, nil + } + if err == errDockerfileNotStringArray { + return nil, nil, err + } + + return parseStringsWhitespaceDelimited(rest, d) +} + +// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument. +func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) { + // Find end of first argument + var sep int + for ; sep < len(rest); sep++ { + if unicode.IsSpace(rune(rest[sep])) { + break + } + } + next := sep + for ; next < len(rest); next++ { + if !unicode.IsSpace(rune(rest[next])) { + break + } + } + + if sep == 0 { + return nil, nil, nil + } + + typ := rest[:sep] + cmd, attrs, err := parseMaybeJSON(rest[next:], d) + if err != nil { + return nil, nil, err + } + + return &Node{Value: typ, Next: cmd}, attrs, err +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go new file mode 100644 index 00000000..e9268abb --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go @@ -0,0 +1,332 @@ +// Package parser implements a parser and parse tree dumper for Dockerfiles. +package parser + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "unicode" + + "github.com/moby/buildkit/frontend/dockerfile/command" + "github.com/pkg/errors" +) + +// Node is a structure used to represent a parse tree. +// +// In the node there are three fields, Value, Next, and Children. Value is the +// current token's string value. Next is always the next non-child token, and +// children contains all the children. Here's an example: +// +// (value next (child child-next child-next-next) next-next) +// +// This data structure is frankly pretty lousy for handling complex languages, +// but lucky for us the Dockerfile isn't very complicated. This structure +// works a little more effectively than a "proper" parse tree for our needs. +// +type Node struct { + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing + Flags []string // only top Node should have this set + StartLine int // the line in the original dockerfile where the node begins + EndLine int // the line in the original dockerfile where the node ends +} + +// Dump dumps the AST defined by `node` as a list of sexps. +// Returns a string suitable for printing. +func (node *Node) Dump() string { + str := "" + str += node.Value + + if len(node.Flags) > 0 { + str += fmt.Sprintf(" %q", node.Flags) + } + + for _, n := range node.Children { + str += "(" + n.Dump() + ")\n" + } + + for n := node.Next; n != nil; n = n.Next { + if len(n.Children) > 0 { + str += " " + n.Dump() + } else { + str += " " + strconv.Quote(n.Value) + } + } + + return strings.TrimSpace(str) +} + +func (node *Node) lines(start, end int) { + node.StartLine = start + node.EndLine = end +} + +// AddChild adds a new child node, and updates line information +func (node *Node) AddChild(child *Node, startLine, endLine int) { + child.lines(startLine, endLine) + if node.StartLine < 0 { + node.StartLine = startLine + } + node.EndLine = endLine + node.Children = append(node.Children, child) +} + +var ( + dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) + tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) + tokenComment = regexp.MustCompile(`^#.*$`) +) + +// DefaultEscapeToken is the default escape token +const DefaultEscapeToken = '\\' + +// Directive is the structure used during a build run to hold the state of +// parsing directives. +type Directive struct { + escapeToken rune // Current escape token + lineContinuationRegex *regexp.Regexp // Current line continuation regex + processingComplete bool // Whether we are done looking for directives + escapeSeen bool // Whether the escape directive has been seen +} + +// setEscapeToken sets the default token for escaping characters in a Dockerfile. +func (d *Directive) setEscapeToken(s string) error { + if s != "`" && s != "\\" { + return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s) + } + d.escapeToken = rune(s[0]) + d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`) + return nil +} + +// possibleParserDirective looks for parser directives, eg '# escapeToken='. +// Parser directives must precede any builder instruction or other comments, +// and cannot be repeated. +func (d *Directive) possibleParserDirective(line string) error { + if d.processingComplete { + return nil + } + + tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line)) + if len(tecMatch) != 0 { + for i, n := range tokenEscapeCommand.SubexpNames() { + if n == "escapechar" { + if d.escapeSeen { + return errors.New("only one escape parser directive can be used") + } + d.escapeSeen = true + return d.setEscapeToken(tecMatch[i]) + } + } + } + + d.processingComplete = true + return nil +} + +// NewDefaultDirective returns a new Directive with the default escapeToken token +func NewDefaultDirective() *Directive { + directive := Directive{} + directive.setEscapeToken(string(DefaultEscapeToken)) + return &directive +} + +func init() { + // Dispatch Table. see line_parsers.go for the parse functions. + // The command is parsed and mapped to the line parser. The line parser + // receives the arguments but not the command, and returns an AST after + // reformulating the arguments according to the rules in the parser + // functions. Errors are propagated up by Parse() and the resulting AST can + // be incorporated directly into the existing AST as a next. + dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){ + command.Add: parseMaybeJSONToList, + command.Arg: parseNameOrNameVal, + command.Cmd: parseMaybeJSON, + command.Copy: parseMaybeJSONToList, + command.Entrypoint: parseMaybeJSON, + command.Env: parseEnv, + command.Expose: parseStringsWhitespaceDelimited, + command.From: parseStringsWhitespaceDelimited, + command.Healthcheck: parseHealthConfig, + command.Label: parseLabel, + command.Maintainer: parseString, + command.Onbuild: parseSubCommand, + command.Run: parseMaybeJSON, + command.Shell: parseMaybeJSON, + command.StopSignal: parseString, + command.User: parseString, + command.Volume: parseMaybeJSONToList, + command.Workdir: parseString, + } +} + +// newNodeFromLine splits the line into parts, and dispatches to a function +// based on the command and command arguments. A Node is created from the +// result of the dispatch. +func newNodeFromLine(line string, directive *Directive) (*Node, error) { + cmd, flags, args, err := splitCommand(line) + if err != nil { + return nil, err + } + + fn := dispatch[cmd] + // Ignore invalid Dockerfile instructions + if fn == nil { + fn = parseIgnore + } + next, attrs, err := fn(args, directive) + if err != nil { + return nil, err + } + + return &Node{ + Value: cmd, + Original: line, + Flags: flags, + Next: next, + Attributes: attrs, + }, nil +} + +// Result is the result of parsing a Dockerfile +type Result struct { + AST *Node + EscapeToken rune + Warnings []string +} + +// PrintWarnings to the writer +func (r *Result) PrintWarnings(out io.Writer) { + if len(r.Warnings) == 0 { + return + } + fmt.Fprintf(out, strings.Join(r.Warnings, "\n")+"\n") +} + +// Parse reads lines from a Reader, parses the lines into an AST and returns +// the AST and escape token +func Parse(rwc io.Reader) (*Result, error) { + d := NewDefaultDirective() + currentLine := 0 + root := &Node{StartLine: -1} + scanner := bufio.NewScanner(rwc) + warnings := []string{} + + var err error + for scanner.Scan() { + bytesRead := scanner.Bytes() + if currentLine == 0 { + // First line, strip the byte-order-marker if present + bytesRead = bytes.TrimPrefix(bytesRead, utf8bom) + } + bytesRead, err = processLine(d, bytesRead, true) + if err != nil { + return nil, err + } + currentLine++ + + startLine := currentLine + line, isEndOfLine := trimContinuationCharacter(string(bytesRead), d) + if isEndOfLine && line == "" { + continue + } + + var hasEmptyContinuationLine bool + for !isEndOfLine && scanner.Scan() { + bytesRead, err := processLine(d, scanner.Bytes(), false) + if err != nil { + return nil, err + } + currentLine++ + + if isComment(scanner.Bytes()) { + // original line was a comment (processLine strips comments) + continue + } + if isEmptyContinuationLine(bytesRead) { + hasEmptyContinuationLine = true + continue + } + + continuationLine := string(bytesRead) + continuationLine, isEndOfLine = trimContinuationCharacter(continuationLine, d) + line += continuationLine + } + + if hasEmptyContinuationLine { + warnings = append(warnings, "[WARNING]: Empty continuation line found in:\n "+line) + } + + child, err := newNodeFromLine(line, d) + if err != nil { + return nil, err + } + root.AddChild(child, startLine, currentLine) + } + + if len(warnings) > 0 { + warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.") + } + + if root.StartLine < 0 { + return nil, errors.New("file with no instructions.") + } + + return &Result{ + AST: root, + Warnings: warnings, + EscapeToken: d.escapeToken, + }, handleScannerError(scanner.Err()) +} + +func trimComments(src []byte) []byte { + return tokenComment.ReplaceAll(src, []byte{}) +} + +func trimWhitespace(src []byte) []byte { + return bytes.TrimLeftFunc(src, unicode.IsSpace) +} + +func isComment(line []byte) bool { + return tokenComment.Match(trimWhitespace(line)) +} + +func isEmptyContinuationLine(line []byte) bool { + return len(trimWhitespace(line)) == 0 +} + +var utf8bom = []byte{0xEF, 0xBB, 0xBF} + +func trimContinuationCharacter(line string, d *Directive) (string, bool) { + if d.lineContinuationRegex.MatchString(line) { + line = d.lineContinuationRegex.ReplaceAllString(line, "") + return line, false + } + return line, true +} + +// TODO: remove stripLeftWhitespace after deprecation period. It seems silly +// to preserve whitespace on continuation lines. Why is that done? +func processLine(d *Directive, token []byte, stripLeftWhitespace bool) ([]byte, error) { + if stripLeftWhitespace { + token = trimWhitespace(token) + } + return trimComments(token), d.possibleParserDirective(string(token)) +} + +func handleScannerError(err error) error { + switch err { + case bufio.ErrTooLong: + return errors.Errorf("dockerfile line greater than max allowed size of %d", bufio.MaxScanTokenSize-1) + default: + return err + } +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go new file mode 100644 index 00000000..171f454f --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go @@ -0,0 +1,118 @@ +package parser + +import ( + "strings" + "unicode" +) + +// splitCommand takes a single line of text and parses out the cmd and args, +// which are used for dispatching to more exact parsing functions. +func splitCommand(line string) (string, []string, string, error) { + var args string + var flags []string + + // Make sure we get the same results irrespective of leading/trailing spaces + cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) + cmd := strings.ToLower(cmdline[0]) + + if len(cmdline) == 2 { + var err error + args, flags, err = extractBuilderFlags(cmdline[1]) + if err != nil { + return "", nil, "", err + } + } + + return cmd, flags, strings.TrimSpace(args), nil +} + +func extractBuilderFlags(line string) (string, []string, error) { + // Parses the BuilderFlags and returns the remaining part of the line + + const ( + inSpaces = iota // looking for start of a word + inWord + inQuote + ) + + words := []string{} + phase := inSpaces + word := "" + quote := '\000' + blankOK := false + var ch rune + + for pos := 0; pos <= len(line); pos++ { + if pos != len(line) { + ch = rune(line[pos]) + } + + if phase == inSpaces { // Looking for start of word + if pos == len(line) { // end of input + break + } + if unicode.IsSpace(ch) { // skip spaces + continue + } + + // Only keep going if the next word starts with -- + if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { + return line[pos:], words, nil + } + + phase = inWord // found something with "--", fall through + } + if (phase == inWord || phase == inQuote) && (pos == len(line)) { + if word != "--" && (blankOK || len(word) > 0) { + words = append(words, word) + } + break + } + if phase == inWord { + if unicode.IsSpace(ch) { + phase = inSpaces + if word == "--" { + return line[pos:], words, nil + } + if blankOK || len(word) > 0 { + words = append(words, word) + } + word = "" + blankOK = false + continue + } + if ch == '\'' || ch == '"' { + quote = ch + blankOK = true + phase = inQuote + continue + } + if ch == '\\' { + if pos+1 == len(line) { + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + continue + } + if phase == inQuote { + if ch == quote { + phase = inWord + continue + } + if ch == '\\' { + if pos+1 == len(line) { + phase = inWord + continue // just skip \ at end + } + pos++ + ch = rune(line[pos]) + } + word += string(ch) + } + } + + return "", words, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest new file mode 100644 index 00000000..38534b0c --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/envVarTest @@ -0,0 +1,238 @@ +A|hello | hello +A|he'll'o | hello +A|he'llo | error +A|he\'llo | he'llo +A|he\\'llo | error +A|abc\tdef | abctdef +A|"abc\tdef" | abc\tdef +A|"abc\\tdef" | abc\tdef +A|'abc\tdef' | abc\tdef +A|hello\ | hello +A|hello\\ | hello\ +A|"hello | error +A|"hello\" | error +A|"hel'lo" | hel'lo +A|'hello | error +A|'hello\' | hello\ +A|'hello\there' | hello\there +A|'hello\\there' | hello\\there +A|"''" | '' +A|$. | $. +A|he$1x | hex +A|he$.x | he$.x +# Next one is different on Windows as $pwd==$PWD +U|he$pwd. | he. +W|he$pwd. | he/home. +A|he$PWD | he/home +A|he\$PWD | he$PWD +A|he\\$PWD | he\/home +A|"he\$PWD" | he$PWD +A|"he\\$PWD" | he\/home +A|\${} | ${} +A|\${}aaa | ${}aaa +A|he\${} | he${} +A|he\${}xx | he${}xx +A|${} | error +A|${}aaa | error +A|he${} | error +A|he${}xx | error +A|he${hi} | he +A|he${hi}xx | hexx +A|he${PWD} | he/home +A|he${.} | error +A|he${XXX:-000}xx | he000xx +A|he${PWD:-000}xx | he/homexx +A|he${XXX:-$PWD}xx | he/homexx +A|he${XXX:-${PWD:-yyy}}xx | he/homexx +A|he${XXX:-${YYY:-yyy}}xx | heyyyxx +A|he${XXX:YYY} | error +A|he${XXX?} | error +A|he${XXX:?} | error +A|he${PWD?} | he/home +A|he${PWD:?} | he/home +A|he${NULL?} | he +A|he${NULL:?} | error +A|he${XXX:+${PWD}}xx | hexx +A|he${PWD:+${XXX}}xx | hexx +A|he${PWD:+${SHELL}}xx | hebashxx +A|he${XXX:+000}xx | hexx +A|he${PWD:+000}xx | he000xx +A|'he${XX}' | he${XX} +A|"he${PWD}" | he/home +A|"he'$PWD'" | he'/home' +A|"$PWD" | /home +A|'$PWD' | $PWD +A|'\$PWD' | \$PWD +A|'"hello"' | "hello" +A|he\$PWD | he$PWD +A|"he\$PWD" | he$PWD +A|'he\$PWD' | he\$PWD +A|he${PWD | error +A|he${PWD:=000}xx | error +A|he${PWD:+${PWD}:}xx | he/home:xx +A|he${XXX:-\$PWD:}xx | he$PWD:xx +A|he${XXX:-\${PWD}z}xx | he${PWDz}xx +A|안녕하세요 | 안녕하세요 +A|안'녕'하세요 | 안녕하세요 +A|안'녕하세요 | error +A|안녕\'하세요 | 안녕'하세요 +A|안\\'녕하세요 | error +A|안녕\t하세요 | 안녕t하세요 +A|"안녕\t하세요" | 안녕\t하세요 +A|'안녕\t하세요 | error +A|안녕하세요\ | 안녕하세요 +A|안녕하세요\\ | 안녕하세요\ +A|"안녕하세요 | error +A|"안녕하세요\" | error +A|"안녕'하세요" | 안녕'하세요 +A|'안녕하세요 | error +A|'안녕하세요\' | 안녕하세요\ +A|안녕$1x | 안녕x +A|안녕$.x | 안녕$.x +# Next one is different on Windows as $pwd==$PWD +U|안녕$pwd. | 안녕. +W|안녕$pwd. | 안녕/home. +A|안녕$PWD | 안녕/home +A|안녕\$PWD | 안녕$PWD +A|안녕\\$PWD | 안녕\/home +A|안녕\${} | 안녕${} +A|안녕\${}xx | 안녕${}xx +A|안녕${} | error +A|안녕${}xx | error +A|안녕${hi} | 안녕 +A|안녕${hi}xx | 안녕xx +A|안녕${PWD} | 안녕/home +A|안녕${.} | error +A|안녕${XXX:-000}xx | 안녕000xx +A|안녕${PWD:-000}xx | 안녕/homexx +A|안녕${XXX:-$PWD}xx | 안녕/homexx +A|안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx +A|안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx +A|안녕${XXX:YYY} | error +A|안녕${XXX:+${PWD}}xx | 안녕xx +A|안녕${PWD:+${XXX}}xx | 안녕xx +A|안녕${PWD:+${SHELL}}xx | 안녕bashxx +A|안녕${XXX:+000}xx | 안녕xx +A|안녕${PWD:+000}xx | 안녕000xx +A|'안녕${XX}' | 안녕${XX} +A|"안녕${PWD}" | 안녕/home +A|"안녕'$PWD'" | 안녕'/home' +A|'"안녕"' | "안녕" +A|안녕\$PWD | 안녕$PWD +A|"안녕\$PWD" | 안녕$PWD +A|'안녕\$PWD' | 안녕\$PWD +A|안녕${PWD | error +A|안녕${PWD:=000}xx | error +A|안녕${PWD:+${PWD}:}xx | 안녕/home:xx +A|안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx +A|안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx +A|$KOREAN | 한국어 +A|안녕$KOREAN | 안녕한국어 +A|${{aaa} | error +A|${aaa}} | } +A|${aaa | error +A|${{aaa:-bbb} | error +A|${aaa:-bbb}} | bbb} +A|${aaa:-bbb | error +A|${aaa:-bbb} | bbb +A|${aaa:-${bbb:-ccc}} | ccc +A|${aaa:-bbb ${foo} | error +A|${aaa:-bbb {foo} | bbb {foo +A|${:} | error +A|${:-bbb} | error +A|${:+bbb} | error + +# Positional parameters won't be set: +# http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_01 +A|$1 | +A|${1} | +A|${1:+bbb} | +A|${1:-bbb} | bbb +A|$2 | +A|${2} | +A|${2:+bbb} | +A|${2:-bbb} | bbb +A|$3 | +A|${3} | +A|${3:+bbb} | +A|${3:-bbb} | bbb +A|$4 | +A|${4} | +A|${4:+bbb} | +A|${4:-bbb} | bbb +A|$5 | +A|${5} | +A|${5:+bbb} | +A|${5:-bbb} | bbb +A|$6 | +A|${6} | +A|${6:+bbb} | +A|${6:-bbb} | bbb +A|$7 | +A|${7} | +A|${7:+bbb} | +A|${7:-bbb} | bbb +A|$8 | +A|${8} | +A|${8:+bbb} | +A|${8:-bbb} | bbb +A|$9 | +A|${9} | +A|${9:+bbb} | +A|${9:-bbb} | bbb +A|$999 | +A|${999} | +A|${999:+bbb} | +A|${999:-bbb} | bbb +A|$999aaa | aaa +A|${999}aaa | aaa +A|${999:+bbb}aaa | aaa +A|${999:-bbb}aaa | bbbaaa +A|$001 | +A|${001} | +A|${001:+bbb} | +A|${001:-bbb} | bbb +A|$001aaa | aaa +A|${001}aaa | aaa +A|${001:+bbb}aaa | aaa +A|${001:-bbb}aaa | bbbaaa + +# Special parameters won't be set in the Dockerfile: +# http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02 +A|$@ | +A|${@} | +A|${@:+bbb} | +A|${@:-bbb} | bbb +A|$@@@ | @@ +A|$@aaa | aaa +A|${@}aaa | aaa +A|${@:+bbb}aaa | aaa +A|${@:-bbb}aaa | bbbaaa +A|$* | +A|${*} | +A|${*:+bbb} | +A|${*:-bbb} | bbb +A|$# | +A|${#} | +A|${#:+bbb} | +A|${#:-bbb} | bbb +A|$? | +A|${?} | +A|${?:+bbb} | +A|${?:-bbb} | bbb +A|$- | +A|${-} | +A|${-:+bbb} | +A|${-:-bbb} | bbb +A|$$ | +A|${$} | +A|${$:+bbb} | +A|${$:-bbb} | bbb +A|$! | +A|${!} | +A|${!:+bbb} | +A|${!:-bbb} | bbb +A|$0 | +A|${0} | +A|${0:+bbb} | +A|${0:-bbb} | bbb diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go new file mode 100644 index 00000000..36903ec5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package shell + +// EqualEnvKeys compare two strings and returns true if they are equal. +// On Unix this comparison is case sensitive. +// On Windows this comparison is case insensitive. +func EqualEnvKeys(from, to string) bool { + return from == to +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go new file mode 100644 index 00000000..010569bb --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go @@ -0,0 +1,10 @@ +package shell + +import "strings" + +// EqualEnvKeys compare two strings and returns true if they are equal. +// On Unix this comparison is case sensitive. +// On Windows this comparison is case insensitive. +func EqualEnvKeys(from, to string) bool { + return strings.ToUpper(from) == strings.ToUpper(to) +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go new file mode 100644 index 00000000..d65913ff --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go @@ -0,0 +1,466 @@ +package shell + +import ( + "bytes" + "fmt" + "strings" + "text/scanner" + "unicode" + + "github.com/pkg/errors" +) + +// Lex performs shell word splitting and variable expansion. +// +// Lex takes a string and an array of env variables and +// process all quotes (" and ') as well as $xxx and ${xxx} env variable +// tokens. Tries to mimic bash shell process. +// It doesn't support all flavors of ${xx:...} formats but new ones can +// be added by adding code to the "special ${} format processing" section +type Lex struct { + escapeToken rune + RawQuotes bool + SkipUnsetEnv bool +} + +// NewLex creates a new Lex which uses escapeToken to escape quotes. +func NewLex(escapeToken rune) *Lex { + return &Lex{escapeToken: escapeToken} +} + +// ProcessWord will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func (s *Lex) ProcessWord(word string, env []string) (string, error) { + word, _, err := s.process(word, BuildEnvs(env)) + return word, err +} + +// ProcessWords will use the 'env' list of environment variables, +// and replace any env var references in 'word' then it will also +// return a slice of strings which represents the 'word' +// split up based on spaces - taking into account quotes. Note that +// this splitting is done **after** the env var substitutions are done. +// Note, each one is trimmed to remove leading and trailing spaces (unless +// they are quoted", but ProcessWord retains spaces between words. +func (s *Lex) ProcessWords(word string, env []string) ([]string, error) { + _, words, err := s.process(word, BuildEnvs(env)) + return words, err +} + +// ProcessWordWithMap will use the 'env' list of environment variables, +// and replace any env var references in 'word'. +func (s *Lex) ProcessWordWithMap(word string, env map[string]string) (string, error) { + word, _, err := s.process(word, env) + return word, err +} + +func (s *Lex) ProcessWordsWithMap(word string, env map[string]string) ([]string, error) { + _, words, err := s.process(word, env) + return words, err +} + +func (s *Lex) process(word string, env map[string]string) (string, []string, error) { + sw := &shellWord{ + envs: env, + escapeToken: s.escapeToken, + skipUnsetEnv: s.SkipUnsetEnv, + rawQuotes: s.RawQuotes, + } + sw.scanner.Init(strings.NewReader(word)) + return sw.process(word) +} + +type shellWord struct { + scanner scanner.Scanner + envs map[string]string + escapeToken rune + rawQuotes bool + skipUnsetEnv bool +} + +func (sw *shellWord) process(source string) (string, []string, error) { + word, words, err := sw.processStopOn(scanner.EOF) + if err != nil { + err = errors.Wrapf(err, "failed to process %q", source) + } + return word, words, err +} + +type wordsStruct struct { + word string + words []string + inWord bool +} + +func (w *wordsStruct) addChar(ch rune) { + if unicode.IsSpace(ch) && w.inWord { + if len(w.word) != 0 { + w.words = append(w.words, w.word) + w.word = "" + w.inWord = false + } + } else if !unicode.IsSpace(ch) { + w.addRawChar(ch) + } +} + +func (w *wordsStruct) addRawChar(ch rune) { + w.word += string(ch) + w.inWord = true +} + +func (w *wordsStruct) addString(str string) { + for _, ch := range str { + w.addChar(ch) + } +} + +func (w *wordsStruct) addRawString(str string) { + w.word += str + w.inWord = true +} + +func (w *wordsStruct) getWords() []string { + if len(w.word) > 0 { + w.words = append(w.words, w.word) + + // Just in case we're called again by mistake + w.word = "" + w.inWord = false + } + return w.words +} + +// Process the word, starting at 'pos', and stop when we get to the +// end of the word or the 'stopChar' character +func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { + var result bytes.Buffer + var words wordsStruct + + var charFuncMapping = map[rune]func() (string, error){ + '\'': sw.processSingleQuote, + '"': sw.processDoubleQuote, + '$': sw.processDollar, + } + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + + if stopChar != scanner.EOF && ch == stopChar { + sw.scanner.Next() + return result.String(), words.getWords(), nil + } + if fn, ok := charFuncMapping[ch]; ok { + // Call special processing func for certain chars + tmp, err := fn() + if err != nil { + return "", []string{}, err + } + result.WriteString(tmp) + + if ch == rune('$') { + words.addString(tmp) + } else { + words.addRawString(tmp) + } + } else { + // Not special, just add it to the result + ch = sw.scanner.Next() + + if ch == sw.escapeToken { + // '\' (default escape token, but ` allowed) escapes, except end of line + ch = sw.scanner.Next() + + if ch == scanner.EOF { + break + } + + words.addRawChar(ch) + } else { + words.addChar(ch) + } + + result.WriteRune(ch) + } + } + if stopChar != scanner.EOF { + return "", []string{}, errors.Errorf("unexpected end of statement while looking for matching %s", string(stopChar)) + } + return result.String(), words.getWords(), nil +} + +func (sw *shellWord) processSingleQuote() (string, error) { + // All chars between single quotes are taken as-is + // Note, you can't escape ' + // + // From the "sh" man page: + // Single Quotes + // Enclosing characters in single quotes preserves the literal meaning of + // all the characters (except single quotes, making it impossible to put + // single-quotes in a single-quoted string). + + var result bytes.Buffer + + ch := sw.scanner.Next() + if sw.rawQuotes { + result.WriteRune(ch) + } + + for { + ch = sw.scanner.Next() + switch ch { + case scanner.EOF: + return "", errors.New("unexpected end of statement while looking for matching single-quote") + case '\'': + if sw.rawQuotes { + result.WriteRune(ch) + } + return result.String(), nil + } + result.WriteRune(ch) + } +} + +func (sw *shellWord) processDoubleQuote() (string, error) { + // All chars up to the next " are taken as-is, even ', except any $ chars + // But you can escape " with a \ (or ` if escape token set accordingly) + // + // From the "sh" man page: + // Double Quotes + // Enclosing characters within double quotes preserves the literal meaning + // of all characters except dollarsign ($), backquote (`), and backslash + // (\). The backslash inside double quotes is historically weird, and + // serves to quote only the following characters: + // $ ` " \ . + // Otherwise it remains literal. + + var result bytes.Buffer + + ch := sw.scanner.Next() + if sw.rawQuotes { + result.WriteRune(ch) + } + + for { + switch sw.scanner.Peek() { + case scanner.EOF: + return "", errors.New("unexpected end of statement while looking for matching double-quote") + case '"': + ch := sw.scanner.Next() + if sw.rawQuotes { + result.WriteRune(ch) + } + return result.String(), nil + case '$': + value, err := sw.processDollar() + if err != nil { + return "", err + } + result.WriteString(value) + default: + ch := sw.scanner.Next() + if ch == sw.escapeToken { + switch sw.scanner.Peek() { + case scanner.EOF: + // Ignore \ at end of word + continue + case '"', '$', sw.escapeToken: + // These chars can be escaped, all other \'s are left as-is + // Note: for now don't do anything special with ` chars. + // Not sure what to do with them anyway since we're not going + // to execute the text in there (not now anyway). + ch = sw.scanner.Next() + } + } + result.WriteRune(ch) + } + } +} + +func (sw *shellWord) processDollar() (string, error) { + sw.scanner.Next() + + // $xxx case + if sw.scanner.Peek() != '{' { + name := sw.processName() + if name == "" { + return "$", nil + } + value, found := sw.getEnv(name) + if !found && sw.skipUnsetEnv { + return "$" + name, nil + } + return value, nil + } + + sw.scanner.Next() + switch sw.scanner.Peek() { + case scanner.EOF: + return "", errors.New("syntax error: missing '}'") + case '{', '}', ':': + // Invalid ${{xx}, ${:xx}, ${:}. ${} case + return "", errors.New("syntax error: bad substitution") + } + name := sw.processName() + ch := sw.scanner.Next() + switch ch { + case '}': + // Normal ${xx} case + value, found := sw.getEnv(name) + if !found && sw.skipUnsetEnv { + return fmt.Sprintf("${%s}", name), nil + } + return value, nil + case '?': + word, _, err := sw.processStopOn('}') + if err != nil { + if sw.scanner.Peek() == scanner.EOF { + return "", errors.New("syntax error: missing '}'") + } + return "", err + } + newValue, found := sw.getEnv(name) + if !found { + if sw.skipUnsetEnv { + return fmt.Sprintf("${%s?%s}", name, word), nil + } + message := "is not allowed to be unset" + if word != "" { + message = word + } + return "", errors.Errorf("%s: %s", name, message) + } + return newValue, nil + case ':': + // Special ${xx:...} format processing + // Yes it allows for recursive $'s in the ... spot + modifier := sw.scanner.Next() + + word, _, err := sw.processStopOn('}') + if err != nil { + if sw.scanner.Peek() == scanner.EOF { + return "", errors.New("syntax error: missing '}'") + } + return "", err + } + + // Grab the current value of the variable in question so we + // can use to to determine what to do based on the modifier + newValue, found := sw.getEnv(name) + + switch modifier { + case '+': + if newValue != "" { + newValue = word + } + if !found && sw.skipUnsetEnv { + return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil + } + return newValue, nil + + case '-': + if newValue == "" { + newValue = word + } + if !found && sw.skipUnsetEnv { + return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil + } + + return newValue, nil + + case '?': + if !found { + if sw.skipUnsetEnv { + return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil + } + message := "is not allowed to be unset" + if word != "" { + message = word + } + return "", errors.Errorf("%s: %s", name, message) + } + if newValue == "" { + message := "is not allowed to be empty" + if word != "" { + message = word + } + return "", errors.Errorf("%s: %s", name, message) + } + return newValue, nil + + default: + return "", errors.Errorf("unsupported modifier (%c) in substitution", modifier) + } + } + return "", errors.Errorf("missing ':' in substitution") +} + +func (sw *shellWord) processName() string { + // Read in a name (alphanumeric or _) + // If it starts with a numeric then just return $# + var name bytes.Buffer + + for sw.scanner.Peek() != scanner.EOF { + ch := sw.scanner.Peek() + if name.Len() == 0 && unicode.IsDigit(ch) { + for sw.scanner.Peek() != scanner.EOF && unicode.IsDigit(sw.scanner.Peek()) { + // Keep reading until the first non-digit character, or EOF + ch = sw.scanner.Next() + name.WriteRune(ch) + } + return name.String() + } + if name.Len() == 0 && isSpecialParam(ch) { + ch = sw.scanner.Next() + return string(ch) + } + if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { + break + } + ch = sw.scanner.Next() + name.WriteRune(ch) + } + + return name.String() +} + +// isSpecialParam checks if the provided character is a special parameters, +// as defined in http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02 +func isSpecialParam(char rune) bool { + switch char { + case '@', '*', '#', '?', '-', '$', '!', '0': + // Special parameters + // http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02 + return true + } + return false +} + +func (sw *shellWord) getEnv(name string) (string, bool) { + for key, value := range sw.envs { + if EqualEnvKeys(name, key) { + return value, true + } + } + return "", false +} + +func BuildEnvs(env []string) map[string]string { + envs := map[string]string{} + + for _, e := range env { + i := strings.Index(e, "=") + + if i < 0 { + envs[e] = "" + } else { + k := e[:i] + v := e[i+1:] + + // overwrite value if key already exists + envs[k] = v + } + } + + return envs +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest new file mode 100644 index 00000000..1fd9f194 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/wordsTest @@ -0,0 +1,30 @@ +hello | hello +hello${hi}bye | hellobye +ENV hi=hi +hello${hi}bye | hellohibye +ENV space=abc def +hello${space}bye | helloabc,defbye +hello"${space}"bye | helloabc defbye +hello "${space}"bye | hello,abc defbye +ENV leading= ab c +hello${leading}def | hello,ab,cdef +hello"${leading}" def | hello ab c,def +hello"${leading}" | hello ab c +hello${leading} | hello,ab,c +# next line MUST have 3 trailing spaces, don't erase them! +ENV trailing=ab c +hello${trailing} | helloab,c +hello${trailing}d | helloab,c,d +hello"${trailing}"d | helloab c d +# next line MUST have 3 trailing spaces, don't erase them! +hel"lo${trailing}" | helloab c +hello" there " | hello there +hello there | hello,there +hello\ there | hello there +hello" there | error +hello\" there | hello",there +hello"\\there" | hello\there +hello"\there" | hello\there +hello'\\there' | hello\\there +hello'\there' | hello\there +hello'$there' | hello$there diff --git a/vendor/github.com/moby/buildkit/frontend/frontend.go b/vendor/github.com/moby/buildkit/frontend/frontend.go new file mode 100644 index 00000000..8ee07dab --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/frontend.go @@ -0,0 +1,32 @@ +package frontend + +import ( + "context" + "io" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/executor" + gw "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" +) + +type Frontend interface { + Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string, inputs map[string]*pb.Definition) (*Result, error) +} + +type FrontendLLBBridge interface { + Solve(ctx context.Context, req SolveRequest) (*Result, error) + ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) + Exec(ctx context.Context, meta executor.Meta, rootfs cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error +} + +type SolveRequest = gw.SolveRequest + +type CacheOptionsEntry = gw.CacheOptionsEntry + +type WorkerInfos interface { + WorkerInfos() []client.WorkerInfo +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go new file mode 100644 index 00000000..bc7f96b5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go @@ -0,0 +1,74 @@ +package client + +import ( + "context" + + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/apicaps" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" + fstypes "github.com/tonistiigi/fsutil/types" +) + +type Client interface { + Solve(ctx context.Context, req SolveRequest) (*Result, error) + ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) + BuildOpts() BuildOpts + Inputs(ctx context.Context) (map[string]llb.State, error) +} + +type Reference interface { + ToState() (llb.State, error) + ReadFile(ctx context.Context, req ReadRequest) ([]byte, error) + StatFile(ctx context.Context, req StatRequest) (*fstypes.Stat, error) + ReadDir(ctx context.Context, req ReadDirRequest) ([]*fstypes.Stat, error) +} + +type ReadRequest struct { + Filename string + Range *FileRange +} + +type FileRange struct { + Offset int + Length int +} + +type ReadDirRequest struct { + Path string + IncludePattern string +} + +type StatRequest struct { + Path string +} + +// SolveRequest is same as frontend.SolveRequest but avoiding dependency +type SolveRequest struct { + Definition *pb.Definition + Frontend string + FrontendOpt map[string]string + FrontendInputs map[string]*pb.Definition + CacheImports []CacheOptionsEntry +} + +type CacheOptionsEntry struct { + Type string + Attrs map[string]string +} + +type WorkerInfo struct { + ID string + Labels map[string]string + Platforms []specs.Platform +} + +type BuildOpts struct { + Opts map[string]string + SessionID string + Workers []WorkerInfo + Product string + LLBCaps apicaps.CapSet + Caps apicaps.CapSet +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go new file mode 100644 index 00000000..bd542284 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go @@ -0,0 +1,54 @@ +package client + +import ( + "context" + "sync" + + "github.com/pkg/errors" +) + +type BuildFunc func(context.Context, Client) (*Result, error) + +type Result struct { + mu sync.Mutex + Ref Reference + Refs map[string]Reference + Metadata map[string][]byte +} + +func NewResult() *Result { + return &Result{} +} + +func (r *Result) AddMeta(k string, v []byte) { + r.mu.Lock() + if r.Metadata == nil { + r.Metadata = map[string][]byte{} + } + r.Metadata[k] = v + r.mu.Unlock() +} + +func (r *Result) AddRef(k string, ref Reference) { + r.mu.Lock() + if r.Refs == nil { + r.Refs = map[string]Reference{} + } + r.Refs[k] = ref + r.mu.Unlock() +} + +func (r *Result) SetRef(ref Reference) { + r.Ref = ref +} + +func (r *Result) SingleRef() (Reference, error) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.Refs != nil && r.Ref == nil { + return nil, errors.Errorf("invalid map result") + } + + return r.Ref, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go new file mode 100644 index 00000000..2fea6480 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go @@ -0,0 +1,217 @@ +package forwarder + +import ( + "context" + "sync" + + "github.com/moby/buildkit/cache" + cacheutil "github.com/moby/buildkit/cache/util" + clienttypes "github.com/moby/buildkit/client" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/gateway/client" + gwpb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + opspb "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/apicaps" + "github.com/moby/buildkit/worker" + "github.com/pkg/errors" + fstypes "github.com/tonistiigi/fsutil/types" +) + +func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, workerInfos []clienttypes.WorkerInfo) (*bridgeClient, error) { + return &bridgeClient{ + opts: opts, + inputs: inputs, + FrontendLLBBridge: llbBridge, + sid: session.FromContext(ctx), + workerInfos: workerInfos, + final: map[*ref]struct{}{}, + }, nil +} + +type bridgeClient struct { + frontend.FrontendLLBBridge + mu sync.Mutex + opts map[string]string + inputs map[string]*opspb.Definition + final map[*ref]struct{} + sid string + exporterAttr map[string][]byte + refs []*ref + workerInfos []clienttypes.WorkerInfo +} + +func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*client.Result, error) { + res, err := c.FrontendLLBBridge.Solve(ctx, frontend.SolveRequest{ + Definition: req.Definition, + Frontend: req.Frontend, + FrontendOpt: req.FrontendOpt, + FrontendInputs: req.FrontendInputs, + CacheImports: req.CacheImports, + }) + if err != nil { + return nil, err + } + + cRes := &client.Result{} + c.mu.Lock() + for k, r := range res.Refs { + rr, err := newRef(r) + if err != nil { + return nil, err + } + c.refs = append(c.refs, rr) + cRes.AddRef(k, rr) + } + if r := res.Ref; r != nil { + rr, err := newRef(r) + if err != nil { + return nil, err + } + c.refs = append(c.refs, rr) + cRes.SetRef(rr) + } + c.mu.Unlock() + cRes.Metadata = res.Metadata + + return cRes, nil +} +func (c *bridgeClient) BuildOpts() client.BuildOpts { + workers := make([]client.WorkerInfo, 0, len(c.workerInfos)) + for _, w := range c.workerInfos { + workers = append(workers, client.WorkerInfo{ + ID: w.ID, + Labels: w.Labels, + Platforms: w.Platforms, + }) + } + + return client.BuildOpts{ + Opts: c.opts, + SessionID: c.sid, + Workers: workers, + Product: apicaps.ExportedProduct, + Caps: gwpb.Caps.CapSet(gwpb.Caps.All()), + LLBCaps: opspb.Caps.CapSet(opspb.Caps.All()), + } +} + +func (c *bridgeClient) Inputs(ctx context.Context) (map[string]llb.State, error) { + inputs := make(map[string]llb.State) + for key, def := range c.inputs { + defop, err := llb.NewDefinitionOp(def) + if err != nil { + return nil, err + } + inputs[key] = llb.NewState(defop) + } + return inputs, nil +} + +func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, error) { + if r == nil { + return nil, nil + } + + res := &frontend.Result{} + + if r.Refs != nil { + res.Refs = make(map[string]solver.ResultProxy, len(r.Refs)) + for k, r := range r.Refs { + rr, ok := r.(*ref) + if !ok { + return nil, errors.Errorf("invalid reference type for forward %T", r) + } + c.final[rr] = struct{}{} + res.Refs[k] = rr.ResultProxy + } + } + if r := r.Ref; r != nil { + rr, ok := r.(*ref) + if !ok { + return nil, errors.Errorf("invalid reference type for forward %T", r) + } + c.final[rr] = struct{}{} + res.Ref = rr.ResultProxy + } + res.Metadata = r.Metadata + + return res, nil +} + +func (c *bridgeClient) discard(err error) { + for _, r := range c.refs { + if r != nil { + if _, ok := c.final[r]; !ok || err != nil { + r.Release(context.TODO()) + } + } + } +} + +type ref struct { + solver.ResultProxy +} + +func newRef(r solver.ResultProxy) (*ref, error) { + return &ref{ResultProxy: r}, nil +} + +func (r *ref) ToState() (st llb.State, err error) { + defop, err := llb.NewDefinitionOp(r.Definition()) + if err != nil { + return st, err + } + return llb.NewState(defop), nil +} + +func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) { + ref, err := r.getImmutableRef(ctx) + if err != nil { + return nil, err + } + newReq := cacheutil.ReadRequest{ + Filename: req.Filename, + } + if r := req.Range; r != nil { + newReq.Range = &cacheutil.FileRange{ + Offset: r.Offset, + Length: r.Length, + } + } + return cacheutil.ReadFile(ctx, ref, newReq) +} + +func (r *ref) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstypes.Stat, error) { + ref, err := r.getImmutableRef(ctx) + if err != nil { + return nil, err + } + newReq := cacheutil.ReadDirRequest{ + Path: req.Path, + IncludePattern: req.IncludePattern, + } + return cacheutil.ReadDir(ctx, ref, newReq) +} + +func (r *ref) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.Stat, error) { + ref, err := r.getImmutableRef(ctx) + if err != nil { + return nil, err + } + return cacheutil.StatFile(ctx, ref, req.Path) +} + +func (r *ref) getImmutableRef(ctx context.Context) (cache.ImmutableRef, error) { + rr, err := r.ResultProxy.Result(ctx) + if err != nil { + return nil, err + } + ref, ok := rr.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid ref: %T", rr.Sys()) + } + return ref.ImmutableRef, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go new file mode 100644 index 00000000..48c946d4 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go @@ -0,0 +1,39 @@ +package forwarder + +import ( + "context" + + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/solver/pb" +) + +func NewGatewayForwarder(w frontend.WorkerInfos, f client.BuildFunc) frontend.Frontend { + return &GatewayForwarder{ + workers: w, + f: f, + } +} + +type GatewayForwarder struct { + workers frontend.WorkerInfos + f client.BuildFunc +} + +func (gf *GatewayForwarder) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*pb.Definition) (retRes *frontend.Result, retErr error) { + c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts, inputs, gf.workers.WorkerInfos()) + if err != nil { + return nil, err + } + + defer func() { + c.discard(retErr) + }() + + res, err := gf.f(ctx, c) + if err != nil { + return nil, err + } + + return c.toFrontendResult(res) +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go new file mode 100644 index 00000000..dd2a993d --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go @@ -0,0 +1,756 @@ +package gateway + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/distribution/reference" + apitypes "github.com/moby/buildkit/api/types" + "github.com/moby/buildkit/cache" + cacheutil "github.com/moby/buildkit/cache/util" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/frontend" + pb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + opspb "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/apicaps" + "github.com/moby/buildkit/util/tracing" + "github.com/moby/buildkit/worker" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/net/http2" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +const ( + keySource = "source" + keyDevel = "gateway-devel" +) + +func NewGatewayFrontend(w frontend.WorkerInfos) frontend.Frontend { + return &gatewayFrontend{ + workers: w, + } +} + +type gatewayFrontend struct { + workers frontend.WorkerInfos +} + +func filterPrefix(opts map[string]string, pfx string) map[string]string { + m := map[string]string{} + for k, v := range opts { + if strings.HasPrefix(k, pfx) { + m[strings.TrimPrefix(k, pfx)] = v + } + } + return m +} + +func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition) (*frontend.Result, error) { + source, ok := opts[keySource] + if !ok { + return nil, errors.Errorf("no source specified for gateway") + } + + sid := session.FromContext(ctx) + + _, isDevel := opts[keyDevel] + var img specs.Image + var rootFS cache.ImmutableRef + var readonly bool // TODO: try to switch to read-only by default. + + if isDevel { + devRes, err := llbBridge.Solve(session.NewContext(ctx, "gateway:"+sid), + frontend.SolveRequest{ + Frontend: source, + FrontendOpt: filterPrefix(opts, "gateway-"), + FrontendInputs: inputs, + }) + if err != nil { + return nil, err + } + defer func() { + devRes.EachRef(func(ref solver.ResultProxy) error { + return ref.Release(context.TODO()) + }) + }() + if devRes.Ref == nil { + return nil, errors.Errorf("development gateway didn't return default result") + } + res, err := devRes.Ref.Result(ctx) + if err != nil { + return nil, err + } + workerRef, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid ref: %T", res.Sys()) + } + rootFS = workerRef.ImmutableRef + config, ok := devRes.Metadata[exptypes.ExporterImageConfigKey] + if ok { + if err := json.Unmarshal(config, &img); err != nil { + return nil, err + } + } + } else { + sourceRef, err := reference.ParseNormalizedNamed(source) + if err != nil { + return nil, err + } + + dgst, config, err := llbBridge.ResolveImageConfig(ctx, reference.TagNameOnly(sourceRef).String(), llb.ResolveImageConfigOpt{}) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(config, &img); err != nil { + return nil, err + } + + if dgst != "" { + sourceRef, err = reference.WithDigest(sourceRef, dgst) + if err != nil { + return nil, err + } + } + + src := llb.Image(sourceRef.String(), &markTypeFrontend{}) + + def, err := src.Marshal() + if err != nil { + return nil, err + } + + res, err := llbBridge.Solve(ctx, frontend.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + defer func() { + res.EachRef(func(ref solver.ResultProxy) error { + return ref.Release(context.TODO()) + }) + }() + if res.Ref == nil { + return nil, errors.Errorf("gateway source didn't return default result") + + } + r, err := res.Ref.Result(ctx) + if err != nil { + return nil, err + } + workerRef, ok := r.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid ref: %T", r.Sys()) + } + rootFS = workerRef.ImmutableRef + } + + lbf, ctx, err := newLLBBridgeForwarder(ctx, llbBridge, gf.workers, inputs) + defer lbf.conn.Close() + if err != nil { + return nil, err + } + + args := []string{"/run"} + env := []string{} + cwd := "/" + if img.Config.Env != nil { + env = img.Config.Env + } + if img.Config.Entrypoint != nil { + args = img.Config.Entrypoint + } + if img.Config.WorkingDir != "" { + cwd = img.Config.WorkingDir + } + i := 0 + for k, v := range opts { + env = append(env, fmt.Sprintf("BUILDKIT_FRONTEND_OPT_%d", i)+"="+k+"="+v) + i++ + } + + env = append(env, "BUILDKIT_SESSION_ID="+sid) + + dt, err := json.Marshal(gf.workers.WorkerInfos()) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal workers array") + } + env = append(env, "BUILDKIT_WORKERS="+string(dt)) + + defer lbf.Discard() + + env = append(env, "BUILDKIT_EXPORTEDPRODUCT="+apicaps.ExportedProduct) + + meta := executor.Meta{ + Env: env, + Args: args, + Cwd: cwd, + ReadonlyRootFS: readonly, + } + + if v, ok := img.Config.Labels["moby.buildkit.frontend.network.none"]; ok { + if ok, _ := strconv.ParseBool(v); ok { + meta.NetMode = opspb.NetMode_NONE + } + } + + err = llbBridge.Exec(ctx, meta, rootFS, lbf.Stdin, lbf.Stdout, os.Stderr) + + if err != nil { + if errors.Cause(err) == context.Canceled && lbf.isErrServerClosed { + err = errors.Errorf("frontend grpc server closed unexpectedly") + } + // An existing error (set via Return rpc) takes + // precedence over this error, which in turn takes + // precedence over a success reported via Return. + lbf.mu.Lock() + if lbf.err == nil { + lbf.result = nil + lbf.err = err + } + lbf.mu.Unlock() + } + + return lbf.Result() +} + +func (lbf *llbBridgeForwarder) Discard() { + lbf.mu.Lock() + defer lbf.mu.Unlock() + for id, r := range lbf.refs { + if lbf.err == nil && lbf.result != nil { + keep := false + lbf.result.EachRef(func(r2 solver.ResultProxy) error { + if r == r2 { + keep = true + } + return nil + }) + if keep { + continue + } + } + r.Release(context.TODO()) + delete(lbf.refs, id) + } +} + +func (lbf *llbBridgeForwarder) Done() <-chan struct{} { + return lbf.doneCh +} + +func (lbf *llbBridgeForwarder) setResult(r *frontend.Result, err error) (*pb.ReturnResponse, error) { + lbf.mu.Lock() + defer lbf.mu.Unlock() + + if (r == nil) == (err == nil) { + return nil, errors.New("gateway return must be either result or err") + } + + if lbf.result != nil || lbf.err != nil { + return nil, errors.New("gateway result is already set") + } + + lbf.result = r + lbf.err = err + close(lbf.doneCh) + return &pb.ReturnResponse{}, nil +} + +func (lbf *llbBridgeForwarder) Result() (*frontend.Result, error) { + lbf.mu.Lock() + defer lbf.mu.Unlock() + + if lbf.result == nil && lbf.err == nil { + return nil, errors.New("no result for incomplete build") + } + + if lbf.err != nil { + return nil, lbf.err + } + + return lbf.result, nil +} + +func NewBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition) *llbBridgeForwarder { + lbf := &llbBridgeForwarder{ + callCtx: ctx, + llbBridge: llbBridge, + refs: map[string]solver.ResultProxy{}, + doneCh: make(chan struct{}), + pipe: newPipe(), + workers: workers, + inputs: inputs, + } + return lbf +} + +func newLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition) (*llbBridgeForwarder, context.Context, error) { + ctx, cancel := context.WithCancel(ctx) + lbf := NewBridgeForwarder(ctx, llbBridge, workers, inputs) + server := grpc.NewServer() + grpc_health_v1.RegisterHealthServer(server, health.NewServer()) + pb.RegisterLLBBridgeServer(server, lbf) + + go func() { + serve(ctx, server, lbf.conn) + select { + case <-ctx.Done(): + default: + lbf.isErrServerClosed = true + } + cancel() + }() + + return lbf, ctx, nil +} + +type pipe struct { + Stdin io.ReadCloser + Stdout io.WriteCloser + conn net.Conn +} + +func newPipe() *pipe { + pr1, pw1, _ := os.Pipe() + pr2, pw2, _ := os.Pipe() + return &pipe{ + Stdin: pr1, + Stdout: pw2, + conn: &conn{ + Reader: pr2, + Writer: pw1, + Closer: pw2, + }, + } +} + +type conn struct { + io.Reader + io.Writer + io.Closer +} + +func (s *conn) LocalAddr() net.Addr { + return dummyAddr{} +} +func (s *conn) RemoteAddr() net.Addr { + return dummyAddr{} +} +func (s *conn) SetDeadline(t time.Time) error { + return nil +} +func (s *conn) SetReadDeadline(t time.Time) error { + return nil +} +func (s *conn) SetWriteDeadline(t time.Time) error { + return nil +} + +type dummyAddr struct { +} + +func (d dummyAddr) Network() string { + return "pipe" +} + +func (d dummyAddr) String() string { + return "localhost" +} + +type LLBBridgeForwarder interface { + pb.LLBBridgeServer + Done() <-chan struct{} + Result() (*frontend.Result, error) +} + +type llbBridgeForwarder struct { + mu sync.Mutex + callCtx context.Context + llbBridge frontend.FrontendLLBBridge + refs map[string]solver.ResultProxy + // lastRef solver.CachedResult + // lastRefs map[string]solver.CachedResult + // err error + doneCh chan struct{} // closed when result or err become valid through a call to a Return + result *frontend.Result + err error + exporterAttr map[string][]byte + workers frontend.WorkerInfos + inputs map[string]*opspb.Definition + isErrServerClosed bool + *pipe +} + +func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.ResolveImageConfigRequest) (*pb.ResolveImageConfigResponse, error) { + ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) + var platform *specs.Platform + if p := req.Platform; p != nil { + platform = &specs.Platform{ + OS: p.OS, + Architecture: p.Architecture, + Variant: p.Variant, + OSVersion: p.OSVersion, + OSFeatures: p.OSFeatures, + } + } + dgst, dt, err := lbf.llbBridge.ResolveImageConfig(ctx, req.Ref, llb.ResolveImageConfigOpt{ + Platform: platform, + ResolveMode: req.ResolveMode, + LogName: req.LogName, + }) + if err != nil { + return nil, err + } + return &pb.ResolveImageConfigResponse{ + Digest: dgst, + Config: dt, + }, nil +} + +func translateLegacySolveRequest(req *pb.SolveRequest) error { + // translates ImportCacheRefs to new CacheImports (v0.4.0) + for _, legacyImportRef := range req.ImportCacheRefsDeprecated { + im := &pb.CacheOptionsEntry{ + Type: "registry", + Attrs: map[string]string{"ref": legacyImportRef}, + } + // FIXME(AkihiroSuda): skip append if already exists + req.CacheImports = append(req.CacheImports, im) + } + req.ImportCacheRefsDeprecated = nil + return nil +} + +func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) (*pb.SolveResponse, error) { + if err := translateLegacySolveRequest(req); err != nil { + return nil, err + } + var cacheImports []frontend.CacheOptionsEntry + for _, e := range req.CacheImports { + cacheImports = append(cacheImports, frontend.CacheOptionsEntry{ + Type: e.Type, + Attrs: e.Attrs, + }) + } + + ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) + res, err := lbf.llbBridge.Solve(ctx, frontend.SolveRequest{ + Definition: req.Definition, + Frontend: req.Frontend, + FrontendOpt: req.FrontendOpt, + FrontendInputs: req.FrontendInputs, + CacheImports: cacheImports, + }) + if err != nil { + return nil, err + } + + if len(res.Refs) > 0 && !req.AllowResultReturn { + // this should never happen because old client shouldn't make a map request + return nil, errors.Errorf("solve did not return default result") + } + + pbRes := &pb.Result{ + Metadata: res.Metadata, + } + var defaultID string + + lbf.mu.Lock() + if res.Refs != nil { + ids := make(map[string]string, len(res.Refs)) + defs := make(map[string]*opspb.Definition, len(res.Refs)) + for k, ref := range res.Refs { + id := identity.NewID() + if ref == nil { + id = "" + } else { + lbf.refs[id] = ref + } + ids[k] = id + defs[k] = ref.Definition() + } + + if req.AllowResultArrayRef { + refMap := make(map[string]*pb.Ref, len(res.Refs)) + for k, id := range ids { + refMap[k] = &pb.Ref{Id: id, Def: defs[k]} + } + pbRes.Result = &pb.Result_Refs{Refs: &pb.RefMap{Refs: refMap}} + } else { + pbRes.Result = &pb.Result_RefsDeprecated{RefsDeprecated: &pb.RefMapDeprecated{Refs: ids}} + } + } else { + ref := res.Ref + id := identity.NewID() + + var def *opspb.Definition + if ref == nil { + id = "" + } else { + def = ref.Definition() + lbf.refs[id] = ref + } + defaultID = id + + if req.AllowResultArrayRef { + pbRes.Result = &pb.Result_Ref{Ref: &pb.Ref{Id: id, Def: def}} + } else { + pbRes.Result = &pb.Result_RefDeprecated{RefDeprecated: id} + } + } + lbf.mu.Unlock() + + // compatibility mode for older clients + if req.Final { + exp := map[string][]byte{} + if err := json.Unmarshal(req.ExporterAttr, &exp); err != nil { + return nil, err + } + + for k, v := range res.Metadata { + exp[k] = v + } + + lbf.mu.Lock() + lbf.result = &frontend.Result{ + Ref: lbf.refs[defaultID], + Metadata: exp, + } + lbf.mu.Unlock() + } + + resp := &pb.SolveResponse{ + Result: pbRes, + } + + if !req.AllowResultReturn { + resp.Ref = defaultID + } + + return resp, nil +} +func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileRequest) (*pb.ReadFileResponse, error) { + ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) + lbf.mu.Lock() + ref, ok := lbf.refs[req.Ref] + lbf.mu.Unlock() + if !ok { + return nil, errors.Errorf("no such ref: %v", req.Ref) + } + if ref == nil { + return nil, errors.Wrapf(os.ErrNotExist, "%s not found", req.FilePath) + } + r, err := ref.Result(ctx) + if err != nil { + return nil, err + } + workerRef, ok := r.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid ref: %T", r.Sys()) + } + + newReq := cacheutil.ReadRequest{ + Filename: req.FilePath, + } + if r := req.Range; r != nil { + newReq.Range = &cacheutil.FileRange{ + Offset: int(r.Offset), + Length: int(r.Length), + } + } + + dt, err := cacheutil.ReadFile(ctx, workerRef.ImmutableRef, newReq) + if err != nil { + return nil, err + } + + return &pb.ReadFileResponse{Data: dt}, nil +} + +func (lbf *llbBridgeForwarder) ReadDir(ctx context.Context, req *pb.ReadDirRequest) (*pb.ReadDirResponse, error) { + ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) + lbf.mu.Lock() + ref, ok := lbf.refs[req.Ref] + lbf.mu.Unlock() + if !ok { + return nil, errors.Errorf("no such ref: %v", req.Ref) + } + if ref == nil { + return nil, errors.Wrapf(os.ErrNotExist, "%s not found", req.DirPath) + } + r, err := ref.Result(ctx) + if err != nil { + return nil, err + } + workerRef, ok := r.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid ref: %T", r.Sys()) + } + + newReq := cacheutil.ReadDirRequest{ + Path: req.DirPath, + IncludePattern: req.IncludePattern, + } + entries, err := cacheutil.ReadDir(ctx, workerRef.ImmutableRef, newReq) + if err != nil { + return nil, err + } + + return &pb.ReadDirResponse{Entries: entries}, nil +} + +func (lbf *llbBridgeForwarder) StatFile(ctx context.Context, req *pb.StatFileRequest) (*pb.StatFileResponse, error) { + ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) + lbf.mu.Lock() + ref, ok := lbf.refs[req.Ref] + lbf.mu.Unlock() + if !ok { + return nil, errors.Errorf("no such ref: %v", req.Ref) + } + if ref == nil { + return nil, errors.Wrapf(os.ErrNotExist, "%s not found", req.Path) + } + r, err := ref.Result(ctx) + if err != nil { + return nil, err + } + workerRef, ok := r.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid ref: %T", r.Sys()) + } + + st, err := cacheutil.StatFile(ctx, workerRef.ImmutableRef, req.Path) + if err != nil { + return nil, err + } + + return &pb.StatFileResponse{Stat: st}, nil +} + +func (lbf *llbBridgeForwarder) Ping(context.Context, *pb.PingRequest) (*pb.PongResponse, error) { + + workers := lbf.workers.WorkerInfos() + pbWorkers := make([]*apitypes.WorkerRecord, 0, len(workers)) + for _, w := range workers { + pbWorkers = append(pbWorkers, &apitypes.WorkerRecord{ + ID: w.ID, + Labels: w.Labels, + Platforms: opspb.PlatformsFromSpec(w.Platforms), + }) + } + + return &pb.PongResponse{ + FrontendAPICaps: pb.Caps.All(), + Workers: pbWorkers, + LLBCaps: opspb.Caps.All(), + }, nil +} + +func (lbf *llbBridgeForwarder) Return(ctx context.Context, in *pb.ReturnRequest) (*pb.ReturnResponse, error) { + if in.Error != nil { + return lbf.setResult(nil, status.ErrorProto(&spb.Status{ + Code: in.Error.Code, + Message: in.Error.Message, + // Details: in.Error.Details, + })) + } else { + r := &frontend.Result{ + Metadata: in.Result.Metadata, + } + + switch res := in.Result.Result.(type) { + case *pb.Result_RefDeprecated: + ref, err := lbf.convertRef(res.RefDeprecated) + if err != nil { + return nil, err + } + r.Ref = ref + case *pb.Result_RefsDeprecated: + m := map[string]solver.ResultProxy{} + for k, id := range res.RefsDeprecated.Refs { + ref, err := lbf.convertRef(id) + if err != nil { + return nil, err + } + m[k] = ref + } + r.Refs = m + case *pb.Result_Ref: + ref, err := lbf.convertRef(res.Ref.Id) + if err != nil { + return nil, err + } + r.Ref = ref + case *pb.Result_Refs: + m := map[string]solver.ResultProxy{} + for k, ref := range res.Refs.Refs { + ref, err := lbf.convertRef(ref.Id) + if err != nil { + return nil, err + } + m[k] = ref + } + r.Refs = m + } + return lbf.setResult(r, nil) + } +} + +func (lbf *llbBridgeForwarder) Inputs(ctx context.Context, in *pb.InputsRequest) (*pb.InputsResponse, error) { + return &pb.InputsResponse{ + Definitions: lbf.inputs, + }, nil +} + +func (lbf *llbBridgeForwarder) convertRef(id string) (solver.ResultProxy, error) { + if id == "" { + return nil, nil + } + + lbf.mu.Lock() + defer lbf.mu.Unlock() + + r, ok := lbf.refs[id] + if !ok { + return nil, errors.Errorf("return reference %s not found", id) + } + + return r, nil +} + +func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) { + go func() { + <-ctx.Done() + conn.Close() + }() + logrus.Debugf("serving grpc connection") + (&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer}) +} + +type markTypeFrontend struct{} + +func (*markTypeFrontend) SetImageOption(ii *llb.ImageInfo) { + ii.RecordType = string(client.UsageRecordTypeFrontend) +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go new file mode 100644 index 00000000..427758e6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go @@ -0,0 +1,591 @@ +package grpcclient + +import ( + "context" + "encoding/json" + "io" + "net" + "os" + "strings" + "time" + + "github.com/gogo/googleapis/google/rpc" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/gateway/client" + pb "github.com/moby/buildkit/frontend/gateway/pb" + opspb "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/apicaps" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + fstypes "github.com/tonistiigi/fsutil/types" + "google.golang.org/grpc" + "google.golang.org/grpc/status" +) + +const frontendPrefix = "BUILDKIT_FRONTEND_OPT_" + +type GrpcClient interface { + Run(context.Context, client.BuildFunc) error +} + +func New(ctx context.Context, opts map[string]string, session, product string, c pb.LLBBridgeClient, w []client.WorkerInfo) (GrpcClient, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + resp, err := c.Ping(ctx, &pb.PingRequest{}) + if err != nil { + return nil, err + } + + if resp.FrontendAPICaps == nil { + resp.FrontendAPICaps = defaultCaps() + } + + if resp.LLBCaps == nil { + resp.LLBCaps = defaultLLBCaps() + } + + return &grpcClient{ + client: c, + opts: opts, + sessionID: session, + workers: w, + product: product, + caps: pb.Caps.CapSet(resp.FrontendAPICaps), + llbCaps: opspb.Caps.CapSet(resp.LLBCaps), + requests: map[string]*pb.SolveRequest{}, + }, nil +} + +func current() (GrpcClient, error) { + if ep := product(); ep != "" { + apicaps.ExportedProduct = ep + } + + ctx, conn, err := grpcClientConn(context.Background()) + if err != nil { + return nil, err + } + + return New(ctx, opts(), sessionID(), product(), pb.NewLLBBridgeClient(conn), workers()) +} + +func convertRef(ref client.Reference) (*pb.Ref, error) { + if ref == nil { + return &pb.Ref{}, nil + } + r, ok := ref.(*reference) + if !ok { + return nil, errors.Errorf("invalid return reference type %T", ref) + } + return &pb.Ref{Id: r.id, Def: r.def}, nil +} + +func RunFromEnvironment(ctx context.Context, f client.BuildFunc) error { + client, err := current() + if err != nil { + return errors.Wrapf(err, "failed to initialize client from environment") + } + return client.Run(ctx, f) +} + +func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError error) { + export := c.caps.Supports(pb.CapReturnResult) == nil + + var ( + res *client.Result + err error + ) + if export { + defer func() { + req := &pb.ReturnRequest{} + if retError == nil { + if res == nil { + res = &client.Result{} + } + pbRes := &pb.Result{ + Metadata: res.Metadata, + } + if res.Refs != nil { + if c.caps.Supports(pb.CapProtoRefArray) == nil { + m := map[string]*pb.Ref{} + for k, r := range res.Refs { + pbRef, err := convertRef(r) + if err != nil { + retError = err + continue + } + m[k] = pbRef + } + pbRes.Result = &pb.Result_Refs{Refs: &pb.RefMap{Refs: m}} + } else { + // Server doesn't support the new wire format for refs, so we construct + // a deprecated result ref map. + m := map[string]string{} + for k, r := range res.Refs { + pbRef, err := convertRef(r) + if err != nil { + retError = err + continue + } + m[k] = pbRef.Id + } + pbRes.Result = &pb.Result_RefsDeprecated{RefsDeprecated: &pb.RefMapDeprecated{Refs: m}} + } + } else { + pbRef, err := convertRef(res.Ref) + if err != nil { + retError = err + } else { + if c.caps.Supports(pb.CapProtoRefArray) == nil { + pbRes.Result = &pb.Result_Ref{Ref: pbRef} + } else { + // Server doesn't support the new wire format for refs, so we construct + // a deprecated result ref. + pbRes.Result = &pb.Result_RefDeprecated{RefDeprecated: pbRef.Id} + } + } + } + if retError == nil { + req.Result = pbRes + } + } + if retError != nil { + st, _ := status.FromError(errors.Cause(retError)) + stp := st.Proto() + req.Error = &rpc.Status{ + Code: stp.Code, + Message: stp.Message, + // Details: stp.Details, + } + } + if _, err := c.client.Return(ctx, req); err != nil && retError == nil { + retError = err + } + }() + } + + if res, err = f(ctx, c); err != nil { + return err + } + + if err := c.caps.Supports(pb.CapReturnMap); len(res.Refs) > 1 && err != nil { + return err + } + + if !export { + exportedAttrBytes, err := json.Marshal(res.Metadata) + if err != nil { + return errors.Wrapf(err, "failed to marshal return metadata") + } + + req, err := c.requestForRef(res.Ref) + if err != nil { + return errors.Wrapf(err, "failed to find return ref") + } + + req.Final = true + req.ExporterAttr = exportedAttrBytes + + if _, err := c.client.Solve(ctx, req); err != nil { + return errors.Wrapf(err, "failed to solve") + } + } + + return nil +} + +// defaultCaps returns the capabilities that were implemented when capabilities +// support was added. This list is frozen and should never be changed. +func defaultCaps() []apicaps.PBCap { + return []apicaps.PBCap{ + {ID: string(pb.CapSolveBase), Enabled: true}, + {ID: string(pb.CapSolveInlineReturn), Enabled: true}, + {ID: string(pb.CapResolveImage), Enabled: true}, + {ID: string(pb.CapReadFile), Enabled: true}, + } +} + +// defaultLLBCaps returns the LLB capabilities that were implemented when capabilities +// support was added. This list is frozen and should never be changed. +func defaultLLBCaps() []apicaps.PBCap { + return []apicaps.PBCap{ + {ID: string(opspb.CapSourceImage), Enabled: true}, + {ID: string(opspb.CapSourceLocal), Enabled: true}, + {ID: string(opspb.CapSourceLocalUnique), Enabled: true}, + {ID: string(opspb.CapSourceLocalSessionID), Enabled: true}, + {ID: string(opspb.CapSourceLocalIncludePatterns), Enabled: true}, + {ID: string(opspb.CapSourceLocalFollowPaths), Enabled: true}, + {ID: string(opspb.CapSourceLocalExcludePatterns), Enabled: true}, + {ID: string(opspb.CapSourceLocalSharedKeyHint), Enabled: true}, + {ID: string(opspb.CapSourceGit), Enabled: true}, + {ID: string(opspb.CapSourceGitKeepDir), Enabled: true}, + {ID: string(opspb.CapSourceGitFullURL), Enabled: true}, + {ID: string(opspb.CapSourceHTTP), Enabled: true}, + {ID: string(opspb.CapSourceHTTPChecksum), Enabled: true}, + {ID: string(opspb.CapSourceHTTPPerm), Enabled: true}, + {ID: string(opspb.CapSourceHTTPUIDGID), Enabled: true}, + {ID: string(opspb.CapBuildOpLLBFileName), Enabled: true}, + {ID: string(opspb.CapExecMetaBase), Enabled: true}, + {ID: string(opspb.CapExecMetaProxy), Enabled: true}, + {ID: string(opspb.CapExecMountBind), Enabled: true}, + {ID: string(opspb.CapExecMountCache), Enabled: true}, + {ID: string(opspb.CapExecMountCacheSharing), Enabled: true}, + {ID: string(opspb.CapExecMountSelector), Enabled: true}, + {ID: string(opspb.CapExecMountTmpfs), Enabled: true}, + {ID: string(opspb.CapExecMountSecret), Enabled: true}, + {ID: string(opspb.CapConstraints), Enabled: true}, + {ID: string(opspb.CapPlatform), Enabled: true}, + {ID: string(opspb.CapMetaIgnoreCache), Enabled: true}, + {ID: string(opspb.CapMetaDescription), Enabled: true}, + {ID: string(opspb.CapMetaExportCache), Enabled: true}, + } +} + +type grpcClient struct { + client pb.LLBBridgeClient + opts map[string]string + sessionID string + product string + workers []client.WorkerInfo + caps apicaps.CapSet + llbCaps apicaps.CapSet + requests map[string]*pb.SolveRequest +} + +func (c *grpcClient) requestForRef(ref client.Reference) (*pb.SolveRequest, error) { + emptyReq := &pb.SolveRequest{ + Definition: &opspb.Definition{}, + } + if ref == nil { + return emptyReq, nil + } + r, ok := ref.(*reference) + if !ok { + return nil, errors.Errorf("return reference has invalid type %T", ref) + } + if r.id == "" { + return emptyReq, nil + } + req, ok := c.requests[r.id] + if !ok { + return nil, errors.Errorf("did not find request for return reference %s", r.id) + } + return req, nil +} + +func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (*client.Result, error) { + if creq.Definition != nil { + for _, md := range creq.Definition.Metadata { + for cap := range md.Caps { + if err := c.llbCaps.Supports(cap); err != nil { + return nil, err + } + } + } + } + var ( + // old API + legacyRegistryCacheImports []string + // new API (CapImportCaches) + cacheImports []*pb.CacheOptionsEntry + ) + supportCapImportCaches := c.caps.Supports(pb.CapImportCaches) == nil + for _, im := range creq.CacheImports { + if !supportCapImportCaches && im.Type == "registry" { + legacyRegistryCacheImports = append(legacyRegistryCacheImports, im.Attrs["ref"]) + } else { + cacheImports = append(cacheImports, &pb.CacheOptionsEntry{ + Type: im.Type, + Attrs: im.Attrs, + }) + } + } + + req := &pb.SolveRequest{ + Definition: creq.Definition, + Frontend: creq.Frontend, + FrontendOpt: creq.FrontendOpt, + FrontendInputs: creq.FrontendInputs, + AllowResultReturn: true, + AllowResultArrayRef: true, + // old API + ImportCacheRefsDeprecated: legacyRegistryCacheImports, + // new API + CacheImports: cacheImports, + } + + // backwards compatibility with inline return + if c.caps.Supports(pb.CapReturnResult) != nil { + req.ExporterAttr = []byte("{}") + } + + resp, err := c.client.Solve(ctx, req) + if err != nil { + return nil, err + } + + res := &client.Result{} + + if resp.Result == nil { + if id := resp.Ref; id != "" { + c.requests[id] = req + } + res.SetRef(&reference{id: resp.Ref, c: c}) + } else { + res.Metadata = resp.Result.Metadata + switch pbRes := resp.Result.Result.(type) { + case *pb.Result_RefDeprecated: + if id := pbRes.RefDeprecated; id != "" { + res.SetRef(&reference{id: id, c: c}) + } + case *pb.Result_RefsDeprecated: + for k, v := range pbRes.RefsDeprecated.Refs { + ref := &reference{id: v, c: c} + if v == "" { + ref = nil + } + res.AddRef(k, ref) + } + case *pb.Result_Ref: + if pbRes.Ref.Id != "" { + ref, err := newReference(c, pbRes.Ref) + if err != nil { + return nil, err + } + res.SetRef(ref) + } + case *pb.Result_Refs: + for k, v := range pbRes.Refs.Refs { + var ref *reference + if v.Id != "" { + ref, err = newReference(c, v) + if err != nil { + return nil, err + } + } + res.AddRef(k, ref) + } + } + } + + return res, nil +} + +func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) { + var p *opspb.Platform + if platform := opt.Platform; platform != nil { + p = &opspb.Platform{ + OS: platform.OS, + Architecture: platform.Architecture, + Variant: platform.Variant, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + } + } + resp, err := c.client.ResolveImageConfig(ctx, &pb.ResolveImageConfigRequest{Ref: ref, Platform: p, ResolveMode: opt.ResolveMode, LogName: opt.LogName}) + if err != nil { + return "", nil, err + } + return resp.Digest, resp.Config, nil +} + +func (c *grpcClient) BuildOpts() client.BuildOpts { + return client.BuildOpts{ + Opts: c.opts, + SessionID: c.sessionID, + Workers: c.workers, + Product: c.product, + LLBCaps: c.llbCaps, + Caps: c.caps, + } +} + +func (c *grpcClient) Inputs(ctx context.Context) (map[string]llb.State, error) { + err := c.caps.Supports(pb.CapFrontendInputs) + if err != nil { + return nil, err + } + + resp, err := c.client.Inputs(ctx, &pb.InputsRequest{}) + if err != nil { + return nil, err + } + + inputs := make(map[string]llb.State) + for key, def := range resp.Definitions { + op, err := llb.NewDefinitionOp(def) + if err != nil { + return nil, err + } + inputs[key] = llb.NewState(op) + } + return inputs, nil + +} + +type reference struct { + c *grpcClient + id string + def *opspb.Definition + output llb.Output +} + +func newReference(c *grpcClient, ref *pb.Ref) (*reference, error) { + return &reference{c: c, id: ref.Id, def: ref.Def}, nil +} + +func (r *reference) ToState() (st llb.State, err error) { + err = r.c.caps.Supports(pb.CapReferenceOutput) + if err != nil { + return st, err + } + + if r.def == nil { + return st, errors.Errorf("gateway did not return reference with definition") + } + + defop, err := llb.NewDefinitionOp(r.def) + if err != nil { + return st, err + } + + return llb.NewState(defop), nil +} + +func (r *reference) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) { + rfr := &pb.ReadFileRequest{FilePath: req.Filename, Ref: r.id} + if r := req.Range; r != nil { + rfr.Range = &pb.FileRange{ + Offset: int64(r.Offset), + Length: int64(r.Length), + } + } + resp, err := r.c.client.ReadFile(ctx, rfr) + if err != nil { + return nil, err + } + return resp.Data, nil +} + +func (r *reference) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstypes.Stat, error) { + if err := r.c.caps.Supports(pb.CapReadDir); err != nil { + return nil, err + } + rdr := &pb.ReadDirRequest{ + DirPath: req.Path, + IncludePattern: req.IncludePattern, + Ref: r.id, + } + resp, err := r.c.client.ReadDir(ctx, rdr) + if err != nil { + return nil, err + } + return resp.Entries, nil +} + +func (r *reference) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.Stat, error) { + if err := r.c.caps.Supports(pb.CapStatFile); err != nil { + return nil, err + } + rdr := &pb.StatFileRequest{ + Path: req.Path, + Ref: r.id, + } + resp, err := r.c.client.StatFile(ctx, rdr) + if err != nil { + return nil, err + } + return resp.Stat, nil +} + +func grpcClientConn(ctx context.Context) (context.Context, *grpc.ClientConn, error) { + dialOpt := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) { + return stdioConn(), nil + }) + + cc, err := grpc.DialContext(ctx, "", dialOpt, grpc.WithInsecure()) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to create grpc client") + } + + ctx, cancel := context.WithCancel(ctx) + _ = cancel + // go monitorHealth(ctx, cc, cancel) + + return ctx, cc, nil +} + +func stdioConn() net.Conn { + return &conn{os.Stdin, os.Stdout, os.Stdout} +} + +type conn struct { + io.Reader + io.Writer + io.Closer +} + +func (s *conn) LocalAddr() net.Addr { + return dummyAddr{} +} +func (s *conn) RemoteAddr() net.Addr { + return dummyAddr{} +} +func (s *conn) SetDeadline(t time.Time) error { + return nil +} +func (s *conn) SetReadDeadline(t time.Time) error { + return nil +} +func (s *conn) SetWriteDeadline(t time.Time) error { + return nil +} + +type dummyAddr struct { +} + +func (d dummyAddr) Network() string { + return "pipe" +} + +func (d dummyAddr) String() string { + return "localhost" +} + +func opts() map[string]string { + opts := map[string]string{} + for _, env := range os.Environ() { + parts := strings.SplitN(env, "=", 2) + k := parts[0] + v := "" + if len(parts) == 2 { + v = parts[1] + } + if !strings.HasPrefix(k, frontendPrefix) { + continue + } + parts = strings.SplitN(v, "=", 2) + v = "" + if len(parts) == 2 { + v = parts[1] + } + opts[parts[0]] = v + } + return opts +} + +func sessionID() string { + return os.Getenv("BUILDKIT_SESSION_ID") +} + +func workers() []client.WorkerInfo { + var c []client.WorkerInfo + if err := json.Unmarshal([]byte(os.Getenv("BUILDKIT_WORKERS")), &c); err != nil { + return nil + } + return c +} + +func product() string { + return os.Getenv("BUILDKIT_EXPORTEDPRODUCT") +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go new file mode 100644 index 00000000..afb51cf2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go @@ -0,0 +1,139 @@ +package moby_buildkit_v1_frontend + +import "github.com/moby/buildkit/util/apicaps" + +var Caps apicaps.CapList + +// Every backwards or forwards non-compatible change needs to add a new capability row. +// By default new capabilities should be experimental. After merge a capability is +// considered immutable. After a capability is marked stable it should not be disabled. + +const ( + CapSolveBase apicaps.CapID = "solve.base" + CapSolveInlineReturn apicaps.CapID = "solve.inlinereturn" + CapResolveImage apicaps.CapID = "resolveimage" + CapResolveImageResolveMode apicaps.CapID = "resolveimage.resolvemode" + CapReadFile apicaps.CapID = "readfile" + CapReturnResult apicaps.CapID = "return" + CapReturnMap apicaps.CapID = "returnmap" + CapReadDir apicaps.CapID = "readdir" + CapStatFile apicaps.CapID = "statfile" + CapImportCaches apicaps.CapID = "importcaches" + + // CapProtoRefArray is a capability to return arrays of refs instead of single + // refs. This capability is only for the wire format change and shouldn't be + // used in frontends for feature detection. + CapProtoRefArray apicaps.CapID = "proto.refarray" + + // CapReferenceOutput is a capability to use a reference of a solved result as + // an llb.Output. + CapReferenceOutput apicaps.CapID = "reference.output" + + // CapFrontendInputs is a capability to request frontend inputs from the + // LLBBridge GRPC server. + CapFrontendInputs apicaps.CapID = "frontend.inputs" + + // CapGatewaySolveMetadata can be used to check if solve calls from gateway reliably return metadata + CapGatewaySolveMetadata apicaps.CapID = "gateway.solve.metadata" +) + +func init() { + + Caps.Init(apicaps.Cap{ + ID: CapSolveBase, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSolveInlineReturn, + Name: "inline return from solve", + Enabled: true, + Deprecated: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapResolveImage, + Name: "resolve remote image config", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapResolveImageResolveMode, + Name: "resolve remote image config with custom resolvemode", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapReadFile, + Name: "read static file", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapReturnResult, + Name: "return solve result", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapReturnMap, + Name: "return reference map", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapReadDir, + Name: "read static directory", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapStatFile, + Name: "stat a file", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapImportCaches, + Name: "import caches", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapProtoRefArray, + Name: "wire format ref arrays", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapReferenceOutput, + Name: "reference output", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapFrontendInputs, + Name: "frontend inputs", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapGatewaySolveMetadata, + Name: "gateway metadata", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go new file mode 100644 index 00000000..a155367e --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go @@ -0,0 +1,7008 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gateway.proto + +package moby_buildkit_v1_frontend + +import ( + context "context" + fmt "fmt" + rpc "github.com/gogo/googleapis/google/rpc" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + types1 "github.com/moby/buildkit/api/types" + pb "github.com/moby/buildkit/solver/pb" + pb1 "github.com/moby/buildkit/util/apicaps/pb" + github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + types "github.com/tonistiigi/fsutil/types" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Result struct { + // Types that are valid to be assigned to Result: + // *Result_RefDeprecated + // *Result_RefsDeprecated + // *Result_Ref + // *Result_Refs + Result isResult_Result `protobuf_oneof:"result"` + Metadata map[string][]byte `protobuf:"bytes,10,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Result) Reset() { *m = Result{} } +func (m *Result) String() string { return proto.CompactTextString(m) } +func (*Result) ProtoMessage() {} +func (*Result) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{0} +} +func (m *Result) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Result) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Result.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Result) XXX_Merge(src proto.Message) { + xxx_messageInfo_Result.Merge(m, src) +} +func (m *Result) XXX_Size() int { + return m.Size() +} +func (m *Result) XXX_DiscardUnknown() { + xxx_messageInfo_Result.DiscardUnknown(m) +} + +var xxx_messageInfo_Result proto.InternalMessageInfo + +type isResult_Result interface { + isResult_Result() + MarshalTo([]byte) (int, error) + Size() int +} + +type Result_RefDeprecated struct { + RefDeprecated string `protobuf:"bytes,1,opt,name=refDeprecated,proto3,oneof" json:"refDeprecated,omitempty"` +} +type Result_RefsDeprecated struct { + RefsDeprecated *RefMapDeprecated `protobuf:"bytes,2,opt,name=refsDeprecated,proto3,oneof" json:"refsDeprecated,omitempty"` +} +type Result_Ref struct { + Ref *Ref `protobuf:"bytes,3,opt,name=ref,proto3,oneof" json:"ref,omitempty"` +} +type Result_Refs struct { + Refs *RefMap `protobuf:"bytes,4,opt,name=refs,proto3,oneof" json:"refs,omitempty"` +} + +func (*Result_RefDeprecated) isResult_Result() {} +func (*Result_RefsDeprecated) isResult_Result() {} +func (*Result_Ref) isResult_Result() {} +func (*Result_Refs) isResult_Result() {} + +func (m *Result) GetResult() isResult_Result { + if m != nil { + return m.Result + } + return nil +} + +func (m *Result) GetRefDeprecated() string { + if x, ok := m.GetResult().(*Result_RefDeprecated); ok { + return x.RefDeprecated + } + return "" +} + +func (m *Result) GetRefsDeprecated() *RefMapDeprecated { + if x, ok := m.GetResult().(*Result_RefsDeprecated); ok { + return x.RefsDeprecated + } + return nil +} + +func (m *Result) GetRef() *Ref { + if x, ok := m.GetResult().(*Result_Ref); ok { + return x.Ref + } + return nil +} + +func (m *Result) GetRefs() *RefMap { + if x, ok := m.GetResult().(*Result_Refs); ok { + return x.Refs + } + return nil +} + +func (m *Result) GetMetadata() map[string][]byte { + if m != nil { + return m.Metadata + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Result) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Result_RefDeprecated)(nil), + (*Result_RefsDeprecated)(nil), + (*Result_Ref)(nil), + (*Result_Refs)(nil), + } +} + +type RefMapDeprecated struct { + Refs map[string]string `protobuf:"bytes,1,rep,name=refs,proto3" json:"refs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RefMapDeprecated) Reset() { *m = RefMapDeprecated{} } +func (m *RefMapDeprecated) String() string { return proto.CompactTextString(m) } +func (*RefMapDeprecated) ProtoMessage() {} +func (*RefMapDeprecated) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{1} +} +func (m *RefMapDeprecated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RefMapDeprecated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RefMapDeprecated.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RefMapDeprecated) XXX_Merge(src proto.Message) { + xxx_messageInfo_RefMapDeprecated.Merge(m, src) +} +func (m *RefMapDeprecated) XXX_Size() int { + return m.Size() +} +func (m *RefMapDeprecated) XXX_DiscardUnknown() { + xxx_messageInfo_RefMapDeprecated.DiscardUnknown(m) +} + +var xxx_messageInfo_RefMapDeprecated proto.InternalMessageInfo + +func (m *RefMapDeprecated) GetRefs() map[string]string { + if m != nil { + return m.Refs + } + return nil +} + +type Ref struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Def *pb.Definition `protobuf:"bytes,2,opt,name=def,proto3" json:"def,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Ref) Reset() { *m = Ref{} } +func (m *Ref) String() string { return proto.CompactTextString(m) } +func (*Ref) ProtoMessage() {} +func (*Ref) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{2} +} +func (m *Ref) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Ref) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Ref.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Ref) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ref.Merge(m, src) +} +func (m *Ref) XXX_Size() int { + return m.Size() +} +func (m *Ref) XXX_DiscardUnknown() { + xxx_messageInfo_Ref.DiscardUnknown(m) +} + +var xxx_messageInfo_Ref proto.InternalMessageInfo + +func (m *Ref) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Ref) GetDef() *pb.Definition { + if m != nil { + return m.Def + } + return nil +} + +type RefMap struct { + Refs map[string]*Ref `protobuf:"bytes,1,rep,name=refs,proto3" json:"refs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RefMap) Reset() { *m = RefMap{} } +func (m *RefMap) String() string { return proto.CompactTextString(m) } +func (*RefMap) ProtoMessage() {} +func (*RefMap) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{3} +} +func (m *RefMap) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RefMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RefMap.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RefMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_RefMap.Merge(m, src) +} +func (m *RefMap) XXX_Size() int { + return m.Size() +} +func (m *RefMap) XXX_DiscardUnknown() { + xxx_messageInfo_RefMap.DiscardUnknown(m) +} + +var xxx_messageInfo_RefMap proto.InternalMessageInfo + +func (m *RefMap) GetRefs() map[string]*Ref { + if m != nil { + return m.Refs + } + return nil +} + +type ReturnRequest struct { + Result *Result `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + Error *rpc.Status `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReturnRequest) Reset() { *m = ReturnRequest{} } +func (m *ReturnRequest) String() string { return proto.CompactTextString(m) } +func (*ReturnRequest) ProtoMessage() {} +func (*ReturnRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{4} +} +func (m *ReturnRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReturnRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReturnRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReturnRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReturnRequest.Merge(m, src) +} +func (m *ReturnRequest) XXX_Size() int { + return m.Size() +} +func (m *ReturnRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReturnRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReturnRequest proto.InternalMessageInfo + +func (m *ReturnRequest) GetResult() *Result { + if m != nil { + return m.Result + } + return nil +} + +func (m *ReturnRequest) GetError() *rpc.Status { + if m != nil { + return m.Error + } + return nil +} + +type ReturnResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReturnResponse) Reset() { *m = ReturnResponse{} } +func (m *ReturnResponse) String() string { return proto.CompactTextString(m) } +func (*ReturnResponse) ProtoMessage() {} +func (*ReturnResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{5} +} +func (m *ReturnResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReturnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReturnResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReturnResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReturnResponse.Merge(m, src) +} +func (m *ReturnResponse) XXX_Size() int { + return m.Size() +} +func (m *ReturnResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReturnResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReturnResponse proto.InternalMessageInfo + +type InputsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InputsRequest) Reset() { *m = InputsRequest{} } +func (m *InputsRequest) String() string { return proto.CompactTextString(m) } +func (*InputsRequest) ProtoMessage() {} +func (*InputsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{6} +} +func (m *InputsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InputsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InputsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InputsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InputsRequest.Merge(m, src) +} +func (m *InputsRequest) XXX_Size() int { + return m.Size() +} +func (m *InputsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InputsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InputsRequest proto.InternalMessageInfo + +type InputsResponse struct { + Definitions map[string]*pb.Definition `protobuf:"bytes,1,rep,name=Definitions,proto3" json:"Definitions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InputsResponse) Reset() { *m = InputsResponse{} } +func (m *InputsResponse) String() string { return proto.CompactTextString(m) } +func (*InputsResponse) ProtoMessage() {} +func (*InputsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{7} +} +func (m *InputsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InputsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InputsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InputsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InputsResponse.Merge(m, src) +} +func (m *InputsResponse) XXX_Size() int { + return m.Size() +} +func (m *InputsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InputsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InputsResponse proto.InternalMessageInfo + +func (m *InputsResponse) GetDefinitions() map[string]*pb.Definition { + if m != nil { + return m.Definitions + } + return nil +} + +type ResolveImageConfigRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform,proto3" json:"Platform,omitempty"` + ResolveMode string `protobuf:"bytes,3,opt,name=ResolveMode,proto3" json:"ResolveMode,omitempty"` + LogName string `protobuf:"bytes,4,opt,name=LogName,proto3" json:"LogName,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResolveImageConfigRequest) Reset() { *m = ResolveImageConfigRequest{} } +func (m *ResolveImageConfigRequest) String() string { return proto.CompactTextString(m) } +func (*ResolveImageConfigRequest) ProtoMessage() {} +func (*ResolveImageConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{8} +} +func (m *ResolveImageConfigRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResolveImageConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResolveImageConfigRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResolveImageConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResolveImageConfigRequest.Merge(m, src) +} +func (m *ResolveImageConfigRequest) XXX_Size() int { + return m.Size() +} +func (m *ResolveImageConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResolveImageConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ResolveImageConfigRequest proto.InternalMessageInfo + +func (m *ResolveImageConfigRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +func (m *ResolveImageConfigRequest) GetPlatform() *pb.Platform { + if m != nil { + return m.Platform + } + return nil +} + +func (m *ResolveImageConfigRequest) GetResolveMode() string { + if m != nil { + return m.ResolveMode + } + return "" +} + +func (m *ResolveImageConfigRequest) GetLogName() string { + if m != nil { + return m.LogName + } + return "" +} + +type ResolveImageConfigResponse struct { + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=Digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"Digest"` + Config []byte `protobuf:"bytes,2,opt,name=Config,proto3" json:"Config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResolveImageConfigResponse) Reset() { *m = ResolveImageConfigResponse{} } +func (m *ResolveImageConfigResponse) String() string { return proto.CompactTextString(m) } +func (*ResolveImageConfigResponse) ProtoMessage() {} +func (*ResolveImageConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{9} +} +func (m *ResolveImageConfigResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResolveImageConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResolveImageConfigResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResolveImageConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResolveImageConfigResponse.Merge(m, src) +} +func (m *ResolveImageConfigResponse) XXX_Size() int { + return m.Size() +} +func (m *ResolveImageConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResolveImageConfigResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResolveImageConfigResponse proto.InternalMessageInfo + +func (m *ResolveImageConfigResponse) GetConfig() []byte { + if m != nil { + return m.Config + } + return nil +} + +type SolveRequest struct { + Definition *pb.Definition `protobuf:"bytes,1,opt,name=Definition,proto3" json:"Definition,omitempty"` + Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"` + FrontendOpt map[string]string `protobuf:"bytes,3,rep,name=FrontendOpt,proto3" json:"FrontendOpt,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ImportCacheRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0. + // When ImportCacheRefsDeprecated is set, the solver appends + // {.Type = "registry", .Attrs = {"ref": importCacheRef}} + // for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed) + ImportCacheRefsDeprecated []string `protobuf:"bytes,4,rep,name=ImportCacheRefsDeprecated,proto3" json:"ImportCacheRefsDeprecated,omitempty"` + AllowResultReturn bool `protobuf:"varint,5,opt,name=allowResultReturn,proto3" json:"allowResultReturn,omitempty"` + AllowResultArrayRef bool `protobuf:"varint,6,opt,name=allowResultArrayRef,proto3" json:"allowResultArrayRef,omitempty"` + // apicaps.CapSolveInlineReturn deprecated + Final bool `protobuf:"varint,10,opt,name=Final,proto3" json:"Final,omitempty"` + ExporterAttr []byte `protobuf:"bytes,11,opt,name=ExporterAttr,proto3" json:"ExporterAttr,omitempty"` + // CacheImports was added in BuildKit v0.4.0. + // apicaps:CapImportCaches + CacheImports []*CacheOptionsEntry `protobuf:"bytes,12,rep,name=CacheImports,proto3" json:"CacheImports,omitempty"` + // apicaps:CapFrontendInputs + FrontendInputs map[string]*pb.Definition `protobuf:"bytes,13,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SolveRequest) Reset() { *m = SolveRequest{} } +func (m *SolveRequest) String() string { return proto.CompactTextString(m) } +func (*SolveRequest) ProtoMessage() {} +func (*SolveRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{10} +} +func (m *SolveRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SolveRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SolveRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SolveRequest.Merge(m, src) +} +func (m *SolveRequest) XXX_Size() int { + return m.Size() +} +func (m *SolveRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SolveRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SolveRequest proto.InternalMessageInfo + +func (m *SolveRequest) GetDefinition() *pb.Definition { + if m != nil { + return m.Definition + } + return nil +} + +func (m *SolveRequest) GetFrontend() string { + if m != nil { + return m.Frontend + } + return "" +} + +func (m *SolveRequest) GetFrontendOpt() map[string]string { + if m != nil { + return m.FrontendOpt + } + return nil +} + +func (m *SolveRequest) GetImportCacheRefsDeprecated() []string { + if m != nil { + return m.ImportCacheRefsDeprecated + } + return nil +} + +func (m *SolveRequest) GetAllowResultReturn() bool { + if m != nil { + return m.AllowResultReturn + } + return false +} + +func (m *SolveRequest) GetAllowResultArrayRef() bool { + if m != nil { + return m.AllowResultArrayRef + } + return false +} + +func (m *SolveRequest) GetFinal() bool { + if m != nil { + return m.Final + } + return false +} + +func (m *SolveRequest) GetExporterAttr() []byte { + if m != nil { + return m.ExporterAttr + } + return nil +} + +func (m *SolveRequest) GetCacheImports() []*CacheOptionsEntry { + if m != nil { + return m.CacheImports + } + return nil +} + +func (m *SolveRequest) GetFrontendInputs() map[string]*pb.Definition { + if m != nil { + return m.FrontendInputs + } + return nil +} + +// CacheOptionsEntry corresponds to the control.CacheOptionsEntry +type CacheOptionsEntry struct { + Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` + Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CacheOptionsEntry) Reset() { *m = CacheOptionsEntry{} } +func (m *CacheOptionsEntry) String() string { return proto.CompactTextString(m) } +func (*CacheOptionsEntry) ProtoMessage() {} +func (*CacheOptionsEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{11} +} +func (m *CacheOptionsEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CacheOptionsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CacheOptionsEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CacheOptionsEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_CacheOptionsEntry.Merge(m, src) +} +func (m *CacheOptionsEntry) XXX_Size() int { + return m.Size() +} +func (m *CacheOptionsEntry) XXX_DiscardUnknown() { + xxx_messageInfo_CacheOptionsEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_CacheOptionsEntry proto.InternalMessageInfo + +func (m *CacheOptionsEntry) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *CacheOptionsEntry) GetAttrs() map[string]string { + if m != nil { + return m.Attrs + } + return nil +} + +type SolveResponse struct { + // deprecated + Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` + // these fields are returned when allowMapReturn was set + Result *Result `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SolveResponse) Reset() { *m = SolveResponse{} } +func (m *SolveResponse) String() string { return proto.CompactTextString(m) } +func (*SolveResponse) ProtoMessage() {} +func (*SolveResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{12} +} +func (m *SolveResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SolveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SolveResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SolveResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SolveResponse.Merge(m, src) +} +func (m *SolveResponse) XXX_Size() int { + return m.Size() +} +func (m *SolveResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SolveResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SolveResponse proto.InternalMessageInfo + +func (m *SolveResponse) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +func (m *SolveResponse) GetResult() *Result { + if m != nil { + return m.Result + } + return nil +} + +type ReadFileRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + FilePath string `protobuf:"bytes,2,opt,name=FilePath,proto3" json:"FilePath,omitempty"` + Range *FileRange `protobuf:"bytes,3,opt,name=Range,proto3" json:"Range,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadFileRequest) Reset() { *m = ReadFileRequest{} } +func (m *ReadFileRequest) String() string { return proto.CompactTextString(m) } +func (*ReadFileRequest) ProtoMessage() {} +func (*ReadFileRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{13} +} +func (m *ReadFileRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadFileRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadFileRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadFileRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadFileRequest.Merge(m, src) +} +func (m *ReadFileRequest) XXX_Size() int { + return m.Size() +} +func (m *ReadFileRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReadFileRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadFileRequest proto.InternalMessageInfo + +func (m *ReadFileRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +func (m *ReadFileRequest) GetFilePath() string { + if m != nil { + return m.FilePath + } + return "" +} + +func (m *ReadFileRequest) GetRange() *FileRange { + if m != nil { + return m.Range + } + return nil +} + +type FileRange struct { + Offset int64 `protobuf:"varint,1,opt,name=Offset,proto3" json:"Offset,omitempty"` + Length int64 `protobuf:"varint,2,opt,name=Length,proto3" json:"Length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileRange) Reset() { *m = FileRange{} } +func (m *FileRange) String() string { return proto.CompactTextString(m) } +func (*FileRange) ProtoMessage() {} +func (*FileRange) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{14} +} +func (m *FileRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FileRange.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FileRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileRange.Merge(m, src) +} +func (m *FileRange) XXX_Size() int { + return m.Size() +} +func (m *FileRange) XXX_DiscardUnknown() { + xxx_messageInfo_FileRange.DiscardUnknown(m) +} + +var xxx_messageInfo_FileRange proto.InternalMessageInfo + +func (m *FileRange) GetOffset() int64 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *FileRange) GetLength() int64 { + if m != nil { + return m.Length + } + return 0 +} + +type ReadFileResponse struct { + Data []byte `protobuf:"bytes,1,opt,name=Data,proto3" json:"Data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadFileResponse) Reset() { *m = ReadFileResponse{} } +func (m *ReadFileResponse) String() string { return proto.CompactTextString(m) } +func (*ReadFileResponse) ProtoMessage() {} +func (*ReadFileResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{15} +} +func (m *ReadFileResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadFileResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadFileResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadFileResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadFileResponse.Merge(m, src) +} +func (m *ReadFileResponse) XXX_Size() int { + return m.Size() +} +func (m *ReadFileResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadFileResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadFileResponse proto.InternalMessageInfo + +func (m *ReadFileResponse) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type ReadDirRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + DirPath string `protobuf:"bytes,2,opt,name=DirPath,proto3" json:"DirPath,omitempty"` + IncludePattern string `protobuf:"bytes,3,opt,name=IncludePattern,proto3" json:"IncludePattern,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDirRequest) Reset() { *m = ReadDirRequest{} } +func (m *ReadDirRequest) String() string { return proto.CompactTextString(m) } +func (*ReadDirRequest) ProtoMessage() {} +func (*ReadDirRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{16} +} +func (m *ReadDirRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadDirRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadDirRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadDirRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDirRequest.Merge(m, src) +} +func (m *ReadDirRequest) XXX_Size() int { + return m.Size() +} +func (m *ReadDirRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDirRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDirRequest proto.InternalMessageInfo + +func (m *ReadDirRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +func (m *ReadDirRequest) GetDirPath() string { + if m != nil { + return m.DirPath + } + return "" +} + +func (m *ReadDirRequest) GetIncludePattern() string { + if m != nil { + return m.IncludePattern + } + return "" +} + +type ReadDirResponse struct { + Entries []*types.Stat `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDirResponse) Reset() { *m = ReadDirResponse{} } +func (m *ReadDirResponse) String() string { return proto.CompactTextString(m) } +func (*ReadDirResponse) ProtoMessage() {} +func (*ReadDirResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{17} +} +func (m *ReadDirResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadDirResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadDirResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadDirResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDirResponse.Merge(m, src) +} +func (m *ReadDirResponse) XXX_Size() int { + return m.Size() +} +func (m *ReadDirResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDirResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDirResponse proto.InternalMessageInfo + +func (m *ReadDirResponse) GetEntries() []*types.Stat { + if m != nil { + return m.Entries + } + return nil +} + +type StatFileRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + Path string `protobuf:"bytes,2,opt,name=Path,proto3" json:"Path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatFileRequest) Reset() { *m = StatFileRequest{} } +func (m *StatFileRequest) String() string { return proto.CompactTextString(m) } +func (*StatFileRequest) ProtoMessage() {} +func (*StatFileRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{18} +} +func (m *StatFileRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatFileRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatFileRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatFileRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatFileRequest.Merge(m, src) +} +func (m *StatFileRequest) XXX_Size() int { + return m.Size() +} +func (m *StatFileRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StatFileRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StatFileRequest proto.InternalMessageInfo + +func (m *StatFileRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +func (m *StatFileRequest) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +type StatFileResponse struct { + Stat *types.Stat `protobuf:"bytes,1,opt,name=stat,proto3" json:"stat,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatFileResponse) Reset() { *m = StatFileResponse{} } +func (m *StatFileResponse) String() string { return proto.CompactTextString(m) } +func (*StatFileResponse) ProtoMessage() {} +func (*StatFileResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{19} +} +func (m *StatFileResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatFileResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatFileResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatFileResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatFileResponse.Merge(m, src) +} +func (m *StatFileResponse) XXX_Size() int { + return m.Size() +} +func (m *StatFileResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StatFileResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StatFileResponse proto.InternalMessageInfo + +func (m *StatFileResponse) GetStat() *types.Stat { + if m != nil { + return m.Stat + } + return nil +} + +type PingRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PingRequest) Reset() { *m = PingRequest{} } +func (m *PingRequest) String() string { return proto.CompactTextString(m) } +func (*PingRequest) ProtoMessage() {} +func (*PingRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{20} +} +func (m *PingRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PingRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PingRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PingRequest.Merge(m, src) +} +func (m *PingRequest) XXX_Size() int { + return m.Size() +} +func (m *PingRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PingRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PingRequest proto.InternalMessageInfo + +type PongResponse struct { + FrontendAPICaps []pb1.APICap `protobuf:"bytes,1,rep,name=FrontendAPICaps,proto3" json:"FrontendAPICaps"` + LLBCaps []pb1.APICap `protobuf:"bytes,2,rep,name=LLBCaps,proto3" json:"LLBCaps"` + Workers []*types1.WorkerRecord `protobuf:"bytes,3,rep,name=Workers,proto3" json:"Workers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PongResponse) Reset() { *m = PongResponse{} } +func (m *PongResponse) String() string { return proto.CompactTextString(m) } +func (*PongResponse) ProtoMessage() {} +func (*PongResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{21} +} +func (m *PongResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PongResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PongResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PongResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PongResponse.Merge(m, src) +} +func (m *PongResponse) XXX_Size() int { + return m.Size() +} +func (m *PongResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PongResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PongResponse proto.InternalMessageInfo + +func (m *PongResponse) GetFrontendAPICaps() []pb1.APICap { + if m != nil { + return m.FrontendAPICaps + } + return nil +} + +func (m *PongResponse) GetLLBCaps() []pb1.APICap { + if m != nil { + return m.LLBCaps + } + return nil +} + +func (m *PongResponse) GetWorkers() []*types1.WorkerRecord { + if m != nil { + return m.Workers + } + return nil +} + +func init() { + proto.RegisterType((*Result)(nil), "moby.buildkit.v1.frontend.Result") + proto.RegisterMapType((map[string][]byte)(nil), "moby.buildkit.v1.frontend.Result.MetadataEntry") + proto.RegisterType((*RefMapDeprecated)(nil), "moby.buildkit.v1.frontend.RefMapDeprecated") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.frontend.RefMapDeprecated.RefsEntry") + proto.RegisterType((*Ref)(nil), "moby.buildkit.v1.frontend.Ref") + proto.RegisterType((*RefMap)(nil), "moby.buildkit.v1.frontend.RefMap") + proto.RegisterMapType((map[string]*Ref)(nil), "moby.buildkit.v1.frontend.RefMap.RefsEntry") + proto.RegisterType((*ReturnRequest)(nil), "moby.buildkit.v1.frontend.ReturnRequest") + proto.RegisterType((*ReturnResponse)(nil), "moby.buildkit.v1.frontend.ReturnResponse") + proto.RegisterType((*InputsRequest)(nil), "moby.buildkit.v1.frontend.InputsRequest") + proto.RegisterType((*InputsResponse)(nil), "moby.buildkit.v1.frontend.InputsResponse") + proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.frontend.InputsResponse.DefinitionsEntry") + proto.RegisterType((*ResolveImageConfigRequest)(nil), "moby.buildkit.v1.frontend.ResolveImageConfigRequest") + proto.RegisterType((*ResolveImageConfigResponse)(nil), "moby.buildkit.v1.frontend.ResolveImageConfigResponse") + proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.frontend.SolveRequest") + proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.frontend.SolveRequest.FrontendInputsEntry") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.frontend.SolveRequest.FrontendOptEntry") + proto.RegisterType((*CacheOptionsEntry)(nil), "moby.buildkit.v1.frontend.CacheOptionsEntry") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.frontend.CacheOptionsEntry.AttrsEntry") + proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.frontend.SolveResponse") + proto.RegisterType((*ReadFileRequest)(nil), "moby.buildkit.v1.frontend.ReadFileRequest") + proto.RegisterType((*FileRange)(nil), "moby.buildkit.v1.frontend.FileRange") + proto.RegisterType((*ReadFileResponse)(nil), "moby.buildkit.v1.frontend.ReadFileResponse") + proto.RegisterType((*ReadDirRequest)(nil), "moby.buildkit.v1.frontend.ReadDirRequest") + proto.RegisterType((*ReadDirResponse)(nil), "moby.buildkit.v1.frontend.ReadDirResponse") + proto.RegisterType((*StatFileRequest)(nil), "moby.buildkit.v1.frontend.StatFileRequest") + proto.RegisterType((*StatFileResponse)(nil), "moby.buildkit.v1.frontend.StatFileResponse") + proto.RegisterType((*PingRequest)(nil), "moby.buildkit.v1.frontend.PingRequest") + proto.RegisterType((*PongResponse)(nil), "moby.buildkit.v1.frontend.PongResponse") +} + +func init() { proto.RegisterFile("gateway.proto", fileDescriptor_f1a937782ebbded5) } + +var fileDescriptor_f1a937782ebbded5 = []byte{ + // 1436 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0xcd, 0x6e, 0xdb, 0xc6, + 0x16, 0x36, 0xad, 0x1f, 0x5b, 0x47, 0x92, 0xad, 0x4c, 0x2e, 0x2e, 0x14, 0x2e, 0x1c, 0x5d, 0x22, + 0xf0, 0x55, 0x12, 0x87, 0x6a, 0x9d, 0x04, 0x4e, 0x9d, 0x22, 0x69, 0x14, 0x27, 0xb0, 0x5b, 0x3b, + 0x51, 0x27, 0x2d, 0x02, 0x04, 0x29, 0x50, 0x4a, 0x1c, 0x32, 0x44, 0x24, 0x0e, 0x3b, 0x1c, 0x25, + 0x15, 0xba, 0x69, 0x77, 0xdd, 0x17, 0xe8, 0x03, 0xf4, 0x01, 0x8a, 0x3e, 0x41, 0xd7, 0x59, 0x76, + 0x59, 0x74, 0x11, 0x14, 0x7e, 0x92, 0x62, 0x7e, 0x68, 0x51, 0xb2, 0x4c, 0x59, 0xe8, 0x4a, 0x33, + 0x87, 0xe7, 0x3b, 0x73, 0xce, 0x37, 0xe7, 0x67, 0x04, 0x55, 0xdf, 0xe1, 0xe4, 0xad, 0x33, 0xb2, + 0x23, 0x46, 0x39, 0x45, 0x97, 0x06, 0xb4, 0x3b, 0xb2, 0xbb, 0xc3, 0xa0, 0xef, 0xbe, 0x0e, 0xb8, + 0xfd, 0xe6, 0x43, 0xdb, 0x63, 0x34, 0xe4, 0x24, 0x74, 0xcd, 0x1b, 0x7e, 0xc0, 0x5f, 0x0d, 0xbb, + 0x76, 0x8f, 0x0e, 0x5a, 0x3e, 0xf5, 0x69, 0x4b, 0x22, 0xba, 0x43, 0x4f, 0xee, 0xe4, 0x46, 0xae, + 0x94, 0x25, 0x73, 0x7b, 0x5a, 0xdd, 0xa7, 0xd4, 0xef, 0x13, 0x27, 0x0a, 0x62, 0xbd, 0x6c, 0xb1, + 0xa8, 0xd7, 0x8a, 0xb9, 0xc3, 0x87, 0xb1, 0xc6, 0x6c, 0xa5, 0x30, 0xc2, 0x91, 0x56, 0xe2, 0x48, + 0x2b, 0xa6, 0xfd, 0x37, 0x84, 0xb5, 0xa2, 0x6e, 0x8b, 0x46, 0x89, 0x76, 0xeb, 0x4c, 0x6d, 0x27, + 0x0a, 0x5a, 0x7c, 0x14, 0x91, 0xb8, 0xf5, 0x96, 0xb2, 0xd7, 0x84, 0x69, 0xc0, 0xcd, 0x33, 0x01, + 0x43, 0x1e, 0xf4, 0x05, 0xaa, 0xe7, 0x44, 0xb1, 0x38, 0x44, 0xfc, 0x6a, 0x50, 0x3a, 0x6c, 0x4e, + 0xc3, 0x20, 0xe6, 0x41, 0xe0, 0x07, 0x2d, 0x2f, 0x96, 0x18, 0x75, 0x8a, 0x08, 0x42, 0xa9, 0x5b, + 0x3f, 0xe6, 0xa0, 0x88, 0x49, 0x3c, 0xec, 0x73, 0xb4, 0x09, 0x55, 0x46, 0xbc, 0x3d, 0x12, 0x31, + 0xd2, 0x73, 0x38, 0x71, 0xeb, 0x46, 0xc3, 0x68, 0x96, 0xf6, 0x97, 0xf0, 0xa4, 0x18, 0x7d, 0x09, + 0x6b, 0x8c, 0x78, 0x71, 0x4a, 0x71, 0xb9, 0x61, 0x34, 0xcb, 0xdb, 0xd7, 0xed, 0x33, 0x2f, 0xc3, + 0xc6, 0xc4, 0x3b, 0x72, 0xa2, 0x31, 0x64, 0x7f, 0x09, 0x4f, 0x19, 0x41, 0xdb, 0x90, 0x63, 0xc4, + 0xab, 0xe7, 0xa4, 0xad, 0x8d, 0x6c, 0x5b, 0xfb, 0x4b, 0x58, 0x28, 0xa3, 0x1d, 0xc8, 0x0b, 0x2b, + 0xf5, 0xbc, 0x04, 0xfd, 0x6f, 0xae, 0x03, 0xfb, 0x4b, 0x58, 0x02, 0xd0, 0x67, 0xb0, 0x3a, 0x20, + 0xdc, 0x71, 0x1d, 0xee, 0xd4, 0xa1, 0x91, 0x6b, 0x96, 0xb7, 0x5b, 0x99, 0x60, 0x41, 0x90, 0x7d, + 0xa4, 0x11, 0x8f, 0x42, 0xce, 0x46, 0xf8, 0xc4, 0x80, 0x79, 0x17, 0xaa, 0x13, 0x9f, 0x50, 0x0d, + 0x72, 0xaf, 0xc9, 0x48, 0xf1, 0x87, 0xc5, 0x12, 0xfd, 0x07, 0x0a, 0x6f, 0x9c, 0xfe, 0x90, 0x48, + 0xaa, 0x2a, 0x58, 0x6d, 0x76, 0x97, 0xef, 0x18, 0xed, 0x55, 0x28, 0x32, 0x69, 0xde, 0xfa, 0xd9, + 0x80, 0xda, 0x34, 0x4f, 0xe8, 0x40, 0x47, 0x68, 0x48, 0x27, 0x6f, 0x2f, 0x40, 0xb1, 0x10, 0xc4, + 0xca, 0x55, 0x69, 0xc2, 0xdc, 0x81, 0xd2, 0x89, 0x68, 0x9e, 0x8b, 0xa5, 0x94, 0x8b, 0xd6, 0x0e, + 0xe4, 0x30, 0xf1, 0xd0, 0x1a, 0x2c, 0x07, 0x3a, 0x29, 0xf0, 0x72, 0xe0, 0xa2, 0x06, 0xe4, 0x5c, + 0xe2, 0xe9, 0xcb, 0x5f, 0xb3, 0xa3, 0xae, 0xbd, 0x47, 0xbc, 0x20, 0x0c, 0x78, 0x40, 0x43, 0x2c, + 0x3e, 0x59, 0xbf, 0x18, 0x22, 0xb9, 0x84, 0x5b, 0xe8, 0xfe, 0x44, 0x1c, 0xf3, 0x53, 0xe5, 0x94, + 0xf7, 0xcf, 0xb3, 0xbd, 0xbf, 0x95, 0xf6, 0x7e, 0x6e, 0xfe, 0xa4, 0xa3, 0xe3, 0x50, 0xc5, 0x84, + 0x0f, 0x59, 0x88, 0xc9, 0x37, 0x43, 0x12, 0x73, 0xf4, 0x51, 0x72, 0x23, 0xd2, 0xfe, 0xbc, 0xb4, + 0x12, 0x8a, 0x58, 0x03, 0x50, 0x13, 0x0a, 0x84, 0x31, 0xca, 0xb4, 0x17, 0xc8, 0x56, 0x9d, 0xc3, + 0x66, 0x51, 0xcf, 0x7e, 0x26, 0x3b, 0x07, 0x56, 0x0a, 0x56, 0x0d, 0xd6, 0x92, 0x53, 0xe3, 0x88, + 0x86, 0x31, 0xb1, 0xd6, 0xa1, 0x7a, 0x10, 0x46, 0x43, 0x1e, 0x6b, 0x3f, 0xac, 0xdf, 0x0d, 0x58, + 0x4b, 0x24, 0x4a, 0x07, 0xbd, 0x84, 0xf2, 0x98, 0xe3, 0x84, 0xcc, 0xdd, 0x0c, 0xff, 0x26, 0xf1, + 0xa9, 0x0b, 0xd2, 0xdc, 0xa6, 0xcd, 0x99, 0x4f, 0xa0, 0x36, 0xad, 0x30, 0x83, 0xe9, 0x2b, 0x93, + 0x4c, 0x4f, 0x5f, 0x7c, 0x8a, 0xd9, 0x9f, 0x0c, 0xb8, 0x84, 0x89, 0x6c, 0x85, 0x07, 0x03, 0xc7, + 0x27, 0x0f, 0x69, 0xe8, 0x05, 0x7e, 0x42, 0x73, 0x4d, 0x66, 0x55, 0x62, 0x59, 0x24, 0x58, 0x13, + 0x56, 0x3b, 0x7d, 0x87, 0x7b, 0x94, 0x0d, 0xb4, 0xf1, 0x8a, 0x30, 0x9e, 0xc8, 0xf0, 0xc9, 0x57, + 0xd4, 0x80, 0xb2, 0x36, 0x7c, 0x44, 0x5d, 0x22, 0x7b, 0x46, 0x09, 0xa7, 0x45, 0xa8, 0x0e, 0x2b, + 0x87, 0xd4, 0x7f, 0xe2, 0x0c, 0x88, 0x6c, 0x0e, 0x25, 0x9c, 0x6c, 0xad, 0xef, 0x0d, 0x30, 0x67, + 0x79, 0xa5, 0x29, 0xfe, 0x14, 0x8a, 0x7b, 0x81, 0x4f, 0x62, 0x75, 0xfb, 0xa5, 0xf6, 0xf6, 0xbb, + 0xf7, 0x97, 0x97, 0xfe, 0x7a, 0x7f, 0xf9, 0x5a, 0xaa, 0xaf, 0xd2, 0x88, 0x84, 0x3d, 0x1a, 0x72, + 0x27, 0x08, 0x09, 0x13, 0xe3, 0xe1, 0x86, 0x2b, 0x21, 0xb6, 0x42, 0x62, 0x6d, 0x01, 0xfd, 0x17, + 0x8a, 0xca, 0xba, 0x2e, 0x7b, 0xbd, 0xb3, 0xfe, 0x2c, 0x40, 0xe5, 0x99, 0x70, 0x20, 0xe1, 0xc2, + 0x06, 0x18, 0x53, 0xa8, 0xd3, 0x6e, 0x9a, 0xd8, 0x94, 0x06, 0x32, 0x61, 0xf5, 0xb1, 0xbe, 0x62, + 0x5d, 0xae, 0x27, 0x7b, 0xf4, 0x02, 0xca, 0xc9, 0xfa, 0x69, 0xc4, 0xeb, 0x39, 0x99, 0x23, 0x77, + 0x32, 0x72, 0x24, 0xed, 0x89, 0x9d, 0x82, 0xea, 0x0c, 0x49, 0x49, 0xd0, 0xc7, 0x70, 0xe9, 0x60, + 0x10, 0x51, 0xc6, 0x1f, 0x3a, 0xbd, 0x57, 0x04, 0x4f, 0x4e, 0x81, 0x7c, 0x23, 0xd7, 0x2c, 0xe1, + 0xb3, 0x15, 0xd0, 0x16, 0x5c, 0x70, 0xfa, 0x7d, 0xfa, 0x56, 0x17, 0x8d, 0x4c, 0xff, 0x7a, 0xa1, + 0x61, 0x34, 0x57, 0xf1, 0xe9, 0x0f, 0xe8, 0x03, 0xb8, 0x98, 0x12, 0x3e, 0x60, 0xcc, 0x19, 0x89, + 0x7c, 0x29, 0x4a, 0xfd, 0x59, 0x9f, 0x44, 0x07, 0x7b, 0x1c, 0x84, 0x4e, 0xbf, 0x0e, 0x52, 0x47, + 0x6d, 0x90, 0x05, 0x95, 0x47, 0xdf, 0x0a, 0x97, 0x08, 0x7b, 0xc0, 0x39, 0xab, 0x97, 0xe5, 0x55, + 0x4c, 0xc8, 0x50, 0x07, 0x2a, 0xd2, 0x61, 0xe5, 0x7b, 0x5c, 0xaf, 0x48, 0xd2, 0xb6, 0x32, 0x48, + 0x93, 0xea, 0x4f, 0xa3, 0x54, 0x29, 0x4d, 0x58, 0x40, 0x3d, 0x58, 0x4b, 0x88, 0x53, 0x35, 0x58, + 0xaf, 0x4a, 0x9b, 0x77, 0x17, 0xbd, 0x08, 0x85, 0x56, 0x47, 0x4c, 0x99, 0x34, 0xef, 0x41, 0x6d, + 0xfa, 0xbe, 0x16, 0x69, 0xec, 0xe6, 0xe7, 0x70, 0x71, 0xc6, 0x31, 0xff, 0xaa, 0xe6, 0x7f, 0x33, + 0xe0, 0xc2, 0x29, 0x6e, 0x10, 0x82, 0xfc, 0x17, 0xa3, 0x88, 0x68, 0x93, 0x72, 0x8d, 0x8e, 0xa0, + 0x20, 0xb8, 0x8f, 0xeb, 0xcb, 0x92, 0x98, 0x9d, 0x45, 0xc8, 0xb6, 0x25, 0x52, 0x91, 0xa2, 0xac, + 0x98, 0x77, 0x00, 0xc6, 0xc2, 0x85, 0xc6, 0xdb, 0x4b, 0xa8, 0x6a, 0xe6, 0x75, 0x0b, 0xa8, 0xa9, + 0x97, 0x88, 0x06, 0x8b, 0x77, 0xc6, 0x78, 0x24, 0xe4, 0x16, 0x1c, 0x09, 0xd6, 0x77, 0xb0, 0x8e, + 0x89, 0xe3, 0x3e, 0x0e, 0xfa, 0xe4, 0xec, 0xce, 0x27, 0xea, 0x39, 0xe8, 0x93, 0x8e, 0xc3, 0x5f, + 0x9d, 0xd4, 0xb3, 0xde, 0xa3, 0x5d, 0x28, 0x60, 0x27, 0xf4, 0x89, 0x3e, 0xfa, 0x4a, 0xc6, 0xd1, + 0xf2, 0x10, 0xa1, 0x8b, 0x15, 0xc4, 0xba, 0x0b, 0xa5, 0x13, 0x99, 0xe8, 0x46, 0x4f, 0x3d, 0x2f, + 0x26, 0xaa, 0xb3, 0xe5, 0xb0, 0xde, 0x09, 0xf9, 0x21, 0x09, 0x7d, 0x7d, 0x74, 0x0e, 0xeb, 0x9d, + 0xb5, 0x29, 0x9e, 0x23, 0x89, 0xe7, 0x9a, 0x1a, 0x04, 0xf9, 0x3d, 0xf1, 0x66, 0x32, 0x64, 0x11, + 0xc9, 0xb5, 0xe5, 0x8a, 0x51, 0xe6, 0xb8, 0x7b, 0x01, 0x3b, 0x3b, 0xc0, 0x3a, 0xac, 0xec, 0x05, + 0x2c, 0x15, 0x5f, 0xb2, 0x45, 0x9b, 0x62, 0xc8, 0xf5, 0xfa, 0x43, 0x57, 0x44, 0xcb, 0x09, 0x0b, + 0x75, 0x37, 0x9f, 0x92, 0x5a, 0xf7, 0x15, 0x8f, 0xf2, 0x14, 0xed, 0xcc, 0x16, 0xac, 0x90, 0x90, + 0xb3, 0x80, 0x24, 0x93, 0x10, 0xd9, 0xea, 0x99, 0x6b, 0xcb, 0x67, 0xae, 0x9c, 0xb8, 0x38, 0x51, + 0xb1, 0x76, 0x60, 0x5d, 0x08, 0xb2, 0x2f, 0x02, 0x41, 0x3e, 0xe5, 0xa4, 0x5c, 0x5b, 0xbb, 0x50, + 0x1b, 0x03, 0xf5, 0xd1, 0x9b, 0x90, 0x17, 0x8f, 0x68, 0xdd, 0xaa, 0x67, 0x9d, 0x2b, 0xbf, 0x5b, + 0x55, 0x28, 0x77, 0x82, 0x30, 0x99, 0x79, 0xd6, 0xb1, 0x01, 0x95, 0x0e, 0x0d, 0xc7, 0xd3, 0xa6, + 0x03, 0xeb, 0x49, 0x05, 0x3e, 0xe8, 0x1c, 0x3c, 0x74, 0xa2, 0x24, 0x94, 0xc6, 0xe9, 0x6b, 0xd6, + 0xef, 0x7d, 0x5b, 0x29, 0xb6, 0xf3, 0x62, 0x30, 0xe1, 0x69, 0x38, 0xfa, 0x04, 0x56, 0x0e, 0x0f, + 0xdb, 0xd2, 0xd2, 0xf2, 0x42, 0x96, 0x12, 0x18, 0xba, 0x07, 0x2b, 0xcf, 0xe5, 0xdf, 0x90, 0x58, + 0x0f, 0x8f, 0x19, 0x29, 0xa7, 0x02, 0x55, 0x6a, 0x98, 0xf4, 0x28, 0x73, 0x71, 0x02, 0xda, 0xfe, + 0xb5, 0x08, 0xa5, 0xc3, 0xc3, 0x76, 0x9b, 0x05, 0xae, 0x4f, 0xd0, 0x0f, 0x06, 0xa0, 0xd3, 0xe3, + 0x16, 0xdd, 0xca, 0xae, 0xa0, 0xd9, 0x6f, 0x06, 0xf3, 0xf6, 0x82, 0x28, 0xcd, 0xf2, 0x0b, 0x28, + 0xc8, 0x0a, 0x47, 0xff, 0x3f, 0x67, 0xf7, 0x35, 0x9b, 0xf3, 0x15, 0xb5, 0xed, 0x1e, 0xac, 0x26, + 0x55, 0x82, 0xae, 0x65, 0xba, 0x37, 0xd1, 0x04, 0xcc, 0xeb, 0xe7, 0xd2, 0xd5, 0x87, 0x7c, 0x0d, + 0x2b, 0x3a, 0xf9, 0xd1, 0xd5, 0x39, 0xb8, 0x71, 0x19, 0x9a, 0xd7, 0xce, 0xa3, 0x3a, 0x0e, 0x23, + 0x49, 0xf2, 0xcc, 0x30, 0xa6, 0x4a, 0x28, 0x33, 0x8c, 0x53, 0x55, 0xf3, 0x1c, 0xf2, 0xa2, 0x1a, + 0xd0, 0x66, 0x06, 0x28, 0x55, 0x2e, 0x66, 0xd6, 0x75, 0x4d, 0x94, 0xd1, 0x57, 0xe2, 0x7f, 0x86, + 0x7c, 0x35, 0x34, 0x33, 0x63, 0x4e, 0x3d, 0xf3, 0xcd, 0xab, 0xe7, 0xd0, 0x1c, 0x9b, 0x57, 0xf3, + 0x31, 0xd3, 0xfc, 0xc4, 0xeb, 0x3d, 0xd3, 0xfc, 0xe4, 0xab, 0xbc, 0x5d, 0x79, 0x77, 0xbc, 0x61, + 0xfc, 0x71, 0xbc, 0x61, 0xfc, 0x7d, 0xbc, 0x61, 0x74, 0x8b, 0xf2, 0x8f, 0xf9, 0xcd, 0x7f, 0x02, + 0x00, 0x00, 0xff, 0xff, 0xa9, 0x05, 0x0f, 0x9d, 0xea, 0x10, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// LLBBridgeClient is the client API for LLBBridge service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LLBBridgeClient interface { + // apicaps:CapResolveImage + ResolveImageConfig(ctx context.Context, in *ResolveImageConfigRequest, opts ...grpc.CallOption) (*ResolveImageConfigResponse, error) + // apicaps:CapSolveBase + Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) + // apicaps:CapReadFile + ReadFile(ctx context.Context, in *ReadFileRequest, opts ...grpc.CallOption) (*ReadFileResponse, error) + // apicaps:CapReadDir + ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) + // apicaps:CapStatFile + StatFile(ctx context.Context, in *StatFileRequest, opts ...grpc.CallOption) (*StatFileResponse, error) + Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) + Return(ctx context.Context, in *ReturnRequest, opts ...grpc.CallOption) (*ReturnResponse, error) + // apicaps:CapFrontendInputs + Inputs(ctx context.Context, in *InputsRequest, opts ...grpc.CallOption) (*InputsResponse, error) +} + +type lLBBridgeClient struct { + cc *grpc.ClientConn +} + +func NewLLBBridgeClient(cc *grpc.ClientConn) LLBBridgeClient { + return &lLBBridgeClient{cc} +} + +func (c *lLBBridgeClient) ResolveImageConfig(ctx context.Context, in *ResolveImageConfigRequest, opts ...grpc.CallOption) (*ResolveImageConfigResponse, error) { + out := new(ResolveImageConfigResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ResolveImageConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lLBBridgeClient) Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) { + out := new(SolveResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Solve", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lLBBridgeClient) ReadFile(ctx context.Context, in *ReadFileRequest, opts ...grpc.CallOption) (*ReadFileResponse, error) { + out := new(ReadFileResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ReadFile", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lLBBridgeClient) ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) { + out := new(ReadDirResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ReadDir", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lLBBridgeClient) StatFile(ctx context.Context, in *StatFileRequest, opts ...grpc.CallOption) (*StatFileResponse, error) { + out := new(StatFileResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/StatFile", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lLBBridgeClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) { + out := new(PongResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Ping", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lLBBridgeClient) Return(ctx context.Context, in *ReturnRequest, opts ...grpc.CallOption) (*ReturnResponse, error) { + out := new(ReturnResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Return", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lLBBridgeClient) Inputs(ctx context.Context, in *InputsRequest, opts ...grpc.CallOption) (*InputsResponse, error) { + out := new(InputsResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Inputs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LLBBridgeServer is the server API for LLBBridge service. +type LLBBridgeServer interface { + // apicaps:CapResolveImage + ResolveImageConfig(context.Context, *ResolveImageConfigRequest) (*ResolveImageConfigResponse, error) + // apicaps:CapSolveBase + Solve(context.Context, *SolveRequest) (*SolveResponse, error) + // apicaps:CapReadFile + ReadFile(context.Context, *ReadFileRequest) (*ReadFileResponse, error) + // apicaps:CapReadDir + ReadDir(context.Context, *ReadDirRequest) (*ReadDirResponse, error) + // apicaps:CapStatFile + StatFile(context.Context, *StatFileRequest) (*StatFileResponse, error) + Ping(context.Context, *PingRequest) (*PongResponse, error) + Return(context.Context, *ReturnRequest) (*ReturnResponse, error) + // apicaps:CapFrontendInputs + Inputs(context.Context, *InputsRequest) (*InputsResponse, error) +} + +// UnimplementedLLBBridgeServer can be embedded to have forward compatible implementations. +type UnimplementedLLBBridgeServer struct { +} + +func (*UnimplementedLLBBridgeServer) ResolveImageConfig(ctx context.Context, req *ResolveImageConfigRequest) (*ResolveImageConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResolveImageConfig not implemented") +} +func (*UnimplementedLLBBridgeServer) Solve(ctx context.Context, req *SolveRequest) (*SolveResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Solve not implemented") +} +func (*UnimplementedLLBBridgeServer) ReadFile(ctx context.Context, req *ReadFileRequest) (*ReadFileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadFile not implemented") +} +func (*UnimplementedLLBBridgeServer) ReadDir(ctx context.Context, req *ReadDirRequest) (*ReadDirResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadDir not implemented") +} +func (*UnimplementedLLBBridgeServer) StatFile(ctx context.Context, req *StatFileRequest) (*StatFileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StatFile not implemented") +} +func (*UnimplementedLLBBridgeServer) Ping(ctx context.Context, req *PingRequest) (*PongResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") +} +func (*UnimplementedLLBBridgeServer) Return(ctx context.Context, req *ReturnRequest) (*ReturnResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Return not implemented") +} +func (*UnimplementedLLBBridgeServer) Inputs(ctx context.Context, req *InputsRequest) (*InputsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Inputs not implemented") +} + +func RegisterLLBBridgeServer(s *grpc.Server, srv LLBBridgeServer) { + s.RegisterService(&_LLBBridge_serviceDesc, srv) +} + +func _LLBBridge_ResolveImageConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResolveImageConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).ResolveImageConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ResolveImageConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).ResolveImageConfig(ctx, req.(*ResolveImageConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LLBBridge_Solve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SolveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).Solve(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Solve", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).Solve(ctx, req.(*SolveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LLBBridge_ReadFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadFileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).ReadFile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ReadFile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).ReadFile(ctx, req.(*ReadFileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LLBBridge_ReadDir_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadDirRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).ReadDir(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ReadDir", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).ReadDir(ctx, req.(*ReadDirRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LLBBridge_StatFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatFileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).StatFile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/StatFile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).StatFile(ctx, req.(*StatFileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LLBBridge_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).Ping(ctx, req.(*PingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LLBBridge_Return_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReturnRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).Return(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Return", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).Return(ctx, req.(*ReturnRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LLBBridge_Inputs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InputsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).Inputs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Inputs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).Inputs(ctx, req.(*InputsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _LLBBridge_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.buildkit.v1.frontend.LLBBridge", + HandlerType: (*LLBBridgeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ResolveImageConfig", + Handler: _LLBBridge_ResolveImageConfig_Handler, + }, + { + MethodName: "Solve", + Handler: _LLBBridge_Solve_Handler, + }, + { + MethodName: "ReadFile", + Handler: _LLBBridge_ReadFile_Handler, + }, + { + MethodName: "ReadDir", + Handler: _LLBBridge_ReadDir_Handler, + }, + { + MethodName: "StatFile", + Handler: _LLBBridge_StatFile_Handler, + }, + { + MethodName: "Ping", + Handler: _LLBBridge_Ping_Handler, + }, + { + MethodName: "Return", + Handler: _LLBBridge_Return_Handler, + }, + { + MethodName: "Inputs", + Handler: _LLBBridge_Inputs_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "gateway.proto", +} + +func (m *Result) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Result) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Result) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Metadata) > 0 { + for k := range m.Metadata { + v := m.Metadata[k] + baseI := i + if len(v) > 0 { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGateway(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if m.Result != nil { + { + size := m.Result.Size() + i -= size + if _, err := m.Result.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Result_RefDeprecated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Result_RefDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.RefDeprecated) + copy(dAtA[i:], m.RefDeprecated) + i = encodeVarintGateway(dAtA, i, uint64(len(m.RefDeprecated))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *Result_RefsDeprecated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Result_RefsDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RefsDeprecated != nil { + { + size, err := m.RefsDeprecated.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Result_Ref) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Result_Ref) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Ref != nil { + { + size, err := m.Ref.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Result_Refs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Result_Refs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Refs != nil { + { + size, err := m.Refs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *RefMapDeprecated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RefMapDeprecated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RefMapDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Refs) > 0 { + for k := range m.Refs { + v := m.Refs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGateway(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Ref) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Ref) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Ref) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Def != nil { + { + size, err := m.Def.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RefMap) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RefMap) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RefMap) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Refs) > 0 { + for k := range m.Refs { + v := m.Refs[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ReturnRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReturnRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReturnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReturnResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReturnResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReturnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *InputsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InputsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InputsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *InputsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InputsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InputsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Definitions) > 0 { + for k := range m.Definitions { + v := m.Definitions[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResolveImageConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveImageConfigRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResolveImageConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.LogName) > 0 { + i -= len(m.LogName) + copy(dAtA[i:], m.LogName) + i = encodeVarintGateway(dAtA, i, uint64(len(m.LogName))) + i-- + dAtA[i] = 0x22 + } + if len(m.ResolveMode) > 0 { + i -= len(m.ResolveMode) + copy(dAtA[i:], m.ResolveMode) + i = encodeVarintGateway(dAtA, i, uint64(len(m.ResolveMode))) + i-- + dAtA[i] = 0x1a + } + if m.Platform != nil { + { + size, err := m.Platform.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResolveImageConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveImageConfigResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResolveImageConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Config) > 0 { + i -= len(m.Config) + copy(dAtA[i:], m.Config) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Config))) + i-- + dAtA[i] = 0x12 + } + if len(m.Digest) > 0 { + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SolveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.FrontendInputs) > 0 { + for k := range m.FrontendInputs { + v := m.FrontendInputs[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.CacheImports) > 0 { + for iNdEx := len(m.CacheImports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CacheImports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } + if len(m.ExporterAttr) > 0 { + i -= len(m.ExporterAttr) + copy(dAtA[i:], m.ExporterAttr) + i = encodeVarintGateway(dAtA, i, uint64(len(m.ExporterAttr))) + i-- + dAtA[i] = 0x5a + } + if m.Final { + i-- + if m.Final { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.AllowResultArrayRef { + i-- + if m.AllowResultArrayRef { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.AllowResultReturn { + i-- + if m.AllowResultReturn { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.ImportCacheRefsDeprecated) > 0 { + for iNdEx := len(m.ImportCacheRefsDeprecated) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ImportCacheRefsDeprecated[iNdEx]) + copy(dAtA[i:], m.ImportCacheRefsDeprecated[iNdEx]) + i = encodeVarintGateway(dAtA, i, uint64(len(m.ImportCacheRefsDeprecated[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.FrontendOpt) > 0 { + for k := range m.FrontendOpt { + v := m.FrontendOpt[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGateway(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Frontend) > 0 { + i -= len(m.Frontend) + copy(dAtA[i:], m.Frontend) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Frontend))) + i-- + dAtA[i] = 0x12 + } + if m.Definition != nil { + { + size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CacheOptionsEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Attrs) > 0 { + for k := range m.Attrs { + v := m.Attrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGateway(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SolveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadFileRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadFileRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Range != nil { + { + size, err := m.Range.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.FilePath) > 0 { + i -= len(m.FilePath) + copy(dAtA[i:], m.FilePath) + i = encodeVarintGateway(dAtA, i, uint64(len(m.FilePath))) + i-- + dAtA[i] = 0x12 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FileRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Length != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Offset)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ReadFileResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadFileResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadDirRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadDirRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadDirRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.IncludePattern) > 0 { + i -= len(m.IncludePattern) + copy(dAtA[i:], m.IncludePattern) + i = encodeVarintGateway(dAtA, i, uint64(len(m.IncludePattern))) + i-- + dAtA[i] = 0x1a + } + if len(m.DirPath) > 0 { + i -= len(m.DirPath) + copy(dAtA[i:], m.DirPath) + i = encodeVarintGateway(dAtA, i, uint64(len(m.DirPath))) + i-- + dAtA[i] = 0x12 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadDirResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadDirResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadDirResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Entries) > 0 { + for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StatFileRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatFileRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StatFileResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatFileResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Stat != nil { + { + size, err := m.Stat.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PingRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PingRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PingRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *PongResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PongResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PongResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Workers) > 0 { + for iNdEx := len(m.Workers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Workers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.LLBCaps) > 0 { + for iNdEx := len(m.LLBCaps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.LLBCaps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.FrontendAPICaps) > 0 { + for iNdEx := len(m.FrontendAPICaps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FrontendAPICaps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGateway(dAtA []byte, offset int, v uint64) int { + offset -= sovGateway(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Result) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + n += m.Result.Size() + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + l = 0 + if len(v) > 0 { + l = 1 + len(v) + sovGateway(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Result_RefDeprecated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RefDeprecated) + n += 1 + l + sovGateway(uint64(l)) + return n +} +func (m *Result_RefsDeprecated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RefsDeprecated != nil { + l = m.RefsDeprecated.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} +func (m *Result_Ref) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ref != nil { + l = m.Ref.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} +func (m *Result_Refs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Refs != nil { + l = m.Refs.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} +func (m *RefMapDeprecated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Refs) > 0 { + for k, v := range m.Refs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Ref) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.Def != nil { + l = m.Def.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RefMap) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Refs) > 0 { + for k, v := range m.Refs { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovGateway(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReturnRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReturnResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InputsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InputsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Definitions) > 0 { + for k, v := range m.Definitions { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovGateway(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResolveImageConfigRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.Platform != nil { + l = m.Platform.Size() + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.ResolveMode) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.LogName) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResolveImageConfigResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.Config) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SolveRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Definition != nil { + l = m.Definition.Size() + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.Frontend) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.FrontendOpt) > 0 { + for k, v := range m.FrontendOpt { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if len(m.ImportCacheRefsDeprecated) > 0 { + for _, s := range m.ImportCacheRefsDeprecated { + l = len(s) + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.AllowResultReturn { + n += 2 + } + if m.AllowResultArrayRef { + n += 2 + } + if m.Final { + n += 2 + } + l = len(m.ExporterAttr) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.CacheImports) > 0 { + for _, e := range m.CacheImports { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if len(m.FrontendInputs) > 0 { + for k, v := range m.FrontendInputs { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovGateway(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CacheOptionsEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SolveResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadFileRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.FilePath) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.Range != nil { + l = m.Range.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *FileRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sovGateway(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovGateway(uint64(m.Length)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadFileResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadDirRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.DirPath) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.IncludePattern) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadDirResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StatFileRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StatFileResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Stat != nil { + l = m.Stat.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PingRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PongResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.FrontendAPICaps) > 0 { + for _, e := range m.FrontendAPICaps { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if len(m.LLBCaps) > 0 { + for _, e := range m.LLBCaps { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if len(m.Workers) > 0 { + for _, e := range m.Workers { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovGateway(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGateway(x uint64) (n int) { + return sovGateway(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Result) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Result: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Result: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RefDeprecated", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Result = &Result_RefDeprecated{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RefsDeprecated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RefMapDeprecated{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Result = &Result_RefsDeprecated{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Ref{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Result = &Result_Ref{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RefMap{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Result = &Result_Refs{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string][]byte) + } + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGateway + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthGateway + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RefMapDeprecated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RefMapDeprecated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RefMapDeprecated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Refs == nil { + m.Refs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Refs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ref) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ref: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ref: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Def == nil { + m.Def = &pb.Definition{} + } + if err := m.Def.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RefMap) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RefMap: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RefMap: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Refs == nil { + m.Refs = make(map[string]*Ref) + } + var mapkey string + var mapvalue *Ref + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGateway + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGateway + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Ref{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Refs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReturnRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReturnRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReturnRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &Result{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &rpc.Status{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReturnResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReturnResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReturnResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InputsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InputsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InputsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InputsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InputsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InputsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Definitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Definitions == nil { + m.Definitions = make(map[string]*pb.Definition) + } + var mapkey string + var mapvalue *pb.Definition + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGateway + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGateway + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &pb.Definition{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Definitions[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveImageConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveImageConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Platform == nil { + m.Platform = &pb.Platform{} + } + if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResolveMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResolveMode = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveImageConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveImageConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveImageConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config[:0], dAtA[iNdEx:postIndex]...) + if m.Config == nil { + m.Config = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SolveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SolveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SolveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Definition == nil { + m.Definition = &pb.Definition{} + } + if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Frontend = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendOpt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FrontendOpt == nil { + m.FrontendOpt = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.FrontendOpt[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportCacheRefsDeprecated", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImportCacheRefsDeprecated = append(m.ImportCacheRefsDeprecated, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowResultReturn", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowResultReturn = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowResultArrayRef", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowResultArrayRef = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Final", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Final = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttr", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExporterAttr = append(m.ExporterAttr[:0], dAtA[iNdEx:postIndex]...) + if m.ExporterAttr == nil { + m.ExporterAttr = []byte{} + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CacheImports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CacheImports = append(m.CacheImports, &CacheOptionsEntry{}) + if err := m.CacheImports[len(m.CacheImports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendInputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FrontendInputs == nil { + m.FrontendInputs = make(map[string]*pb.Definition) + } + var mapkey string + var mapvalue *pb.Definition + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGateway + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGateway + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &pb.Definition{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.FrontendInputs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheOptionsEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheOptionsEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attrs == nil { + m.Attrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SolveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SolveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SolveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &Result{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadFileRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadFileRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadFileRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Range == nil { + m.Range = &FileRange{} + } + if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Offset |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadFileResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadFileResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadFileResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadDirRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadDirRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadDirRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DirPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DirPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludePattern", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncludePattern = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadDirResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadDirResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadDirResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, &types.Stat{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatFileRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatFileRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatFileRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatFileResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatFileResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatFileResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stat", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Stat == nil { + m.Stat = &types.Stat{} + } + if err := m.Stat.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PongResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PongResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PongResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendAPICaps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FrontendAPICaps = append(m.FrontendAPICaps, pb1.APICap{}) + if err := m.FrontendAPICaps[len(m.FrontendAPICaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LLBCaps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LLBCaps = append(m.LLBCaps, pb1.APICap{}) + if err := m.LLBCaps[len(m.LLBCaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workers = append(m.Workers, &types1.WorkerRecord{}) + if err := m.Workers[len(m.Workers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGateway(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGateway + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGateway + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGateway + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGateway + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGateway + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGateway + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGateway = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGateway = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGateway = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto new file mode 100644 index 00000000..44131477 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto @@ -0,0 +1,164 @@ +syntax = "proto3"; + +package moby.buildkit.v1.frontend; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "github.com/gogo/googleapis/google/rpc/status.proto"; +import "github.com/moby/buildkit/solver/pb/ops.proto"; +import "github.com/moby/buildkit/api/types/worker.proto"; +import "github.com/moby/buildkit/util/apicaps/pb/caps.proto"; +import "github.com/tonistiigi/fsutil/types/stat.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +service LLBBridge { + // apicaps:CapResolveImage + rpc ResolveImageConfig(ResolveImageConfigRequest) returns (ResolveImageConfigResponse); + // apicaps:CapSolveBase + rpc Solve(SolveRequest) returns (SolveResponse); + // apicaps:CapReadFile + rpc ReadFile(ReadFileRequest) returns (ReadFileResponse); + // apicaps:CapReadDir + rpc ReadDir(ReadDirRequest) returns (ReadDirResponse); + // apicaps:CapStatFile + rpc StatFile(StatFileRequest) returns (StatFileResponse); + rpc Ping(PingRequest) returns (PongResponse); + rpc Return(ReturnRequest) returns (ReturnResponse); + // apicaps:CapFrontendInputs + rpc Inputs(InputsRequest) returns (InputsResponse); +} + +message Result { + oneof result { + // Deprecated non-array refs. + string refDeprecated = 1; + RefMapDeprecated refsDeprecated = 2; + + Ref ref = 3; + RefMap refs = 4; + } + map metadata = 10; +} + +message RefMapDeprecated { + map refs = 1; +} + +message Ref { + string id = 1; + pb.Definition def = 2; +} + +message RefMap { + map refs = 1; +} + +message ReturnRequest { + Result result = 1; + google.rpc.Status error = 2; +} + +message ReturnResponse { +} + +message InputsRequest { +} + +message InputsResponse { + map Definitions = 1; +} + +message ResolveImageConfigRequest { + string Ref = 1; + pb.Platform Platform = 2; + string ResolveMode = 3; + string LogName = 4; +} + +message ResolveImageConfigResponse { + string Digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + bytes Config = 2; +} + +message SolveRequest { + pb.Definition Definition = 1; + string Frontend = 2; + map FrontendOpt = 3; + // ImportCacheRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0. + // When ImportCacheRefsDeprecated is set, the solver appends + // {.Type = "registry", .Attrs = {"ref": importCacheRef}} + // for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed) + repeated string ImportCacheRefsDeprecated = 4; + bool allowResultReturn = 5; + bool allowResultArrayRef = 6; + + // apicaps.CapSolveInlineReturn deprecated + bool Final = 10; + bytes ExporterAttr = 11; + // CacheImports was added in BuildKit v0.4.0. + // apicaps:CapImportCaches + repeated CacheOptionsEntry CacheImports = 12; + + // apicaps:CapFrontendInputs + map FrontendInputs = 13; +} + +// CacheOptionsEntry corresponds to the control.CacheOptionsEntry +message CacheOptionsEntry { + string Type = 1; + map Attrs = 2; +} + +message SolveResponse { + // deprecated + string ref = 1; // can be used by readfile request + // deprecated +/* bytes ExporterAttr = 2;*/ + + // these fields are returned when allowMapReturn was set + Result result = 3; +} + +message ReadFileRequest { + string Ref = 1; + string FilePath = 2; + FileRange Range = 3; +} + +message FileRange { + int64 Offset = 1; + int64 Length = 2; +} + +message ReadFileResponse { + bytes Data = 1; +} + +message ReadDirRequest { + string Ref = 1; + string DirPath = 2; + string IncludePattern = 3; +} + +message ReadDirResponse { + repeated fsutil.types.Stat entries = 1; +} + +message StatFileRequest { + string Ref = 1; + string Path = 2; +} + +message StatFileResponse { + fsutil.types.Stat stat = 1; +} + +message PingRequest{ +} +message PongResponse{ + repeated moby.buildkit.v1.apicaps.APICap FrontendAPICaps = 1 [(gogoproto.nullable) = false]; + repeated moby.buildkit.v1.apicaps.APICap LLBCaps = 2 [(gogoproto.nullable) = false]; + repeated moby.buildkit.v1.types.WorkerRecord Workers = 3; +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go new file mode 100644 index 00000000..4ab07c6d --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go @@ -0,0 +1,3 @@ +package moby_buildkit_v1_frontend + +//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. gateway.proto diff --git a/vendor/github.com/moby/buildkit/frontend/result.go b/vendor/github.com/moby/buildkit/frontend/result.go new file mode 100644 index 00000000..5afc10c9 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/result.go @@ -0,0 +1,25 @@ +package frontend + +import ( + "github.com/moby/buildkit/solver" +) + +type Result struct { + Ref solver.ResultProxy + Refs map[string]solver.ResultProxy + Metadata map[string][]byte +} + +func (r *Result) EachRef(fn func(solver.ResultProxy) error) (err error) { + if r.Ref != nil { + err = fn(r.Ref) + } + for _, r := range r.Refs { + if r != nil { + if err1 := fn(r); err1 != nil && err == nil { + err = err1 + } + } + } + return err +} diff --git a/vendor/github.com/moby/buildkit/identity/randomid.go b/vendor/github.com/moby/buildkit/identity/randomid.go new file mode 100644 index 00000000..0eb13527 --- /dev/null +++ b/vendor/github.com/moby/buildkit/identity/randomid.go @@ -0,0 +1,53 @@ +package identity + +import ( + cryptorand "crypto/rand" + "fmt" + "io" + "math/big" +) + +var ( + // idReader is used for random id generation. This declaration allows us to + // replace it for testing. + idReader = cryptorand.Reader +) + +// parameters for random identifier generation. We can tweak this when there is +// time for further analysis. +const ( + randomIDEntropyBytes = 17 + randomIDBase = 36 + + // To ensure that all identifiers are fixed length, we make sure they + // get padded out or truncated to 25 characters. + // + // For academics, f5lxx1zz5pnorynqglhzmsp33 == 2^128 - 1. This value + // was calculated from floor(log(2^128-1, 36)) + 1. + // + // While 128 bits is the largest whole-byte size that fits into 25 + // base-36 characters, we generate an extra byte of entropy to fill + // in the high bits, which would otherwise be 0. This gives us a more + // even distribution of the first character. + // + // See http://mathworld.wolfram.com/NumberLength.html for more information. + maxRandomIDLength = 25 +) + +// NewID generates a new identifier for use where random identifiers with low +// collision probability are required. +// +// With the parameters in this package, the generated identifier will provide +// ~129 bits of entropy encoded with base36. Leading padding is added if the +// string is less 25 bytes. We do not intend to maintain this interface, so +// identifiers should be treated opaquely. +func NewID() string { + var p [randomIDEntropyBytes]byte + + if _, err := io.ReadFull(idReader, p[:]); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + p[0] |= 0x80 // set high bit to avoid the need for padding + return (&big.Int{}).SetBytes(p[:]).Text(randomIDBase)[1 : maxRandomIDLength+1] +} diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.go b/vendor/github.com/moby/buildkit/session/auth/auth.go new file mode 100644 index 00000000..5717455f --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/auth/auth.go @@ -0,0 +1,27 @@ +package auth + +import ( + "context" + + "github.com/moby/buildkit/session" + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func CredentialsFunc(ctx context.Context, c session.Caller) func(string) (string, string, error) { + return func(host string) (string, string, error) { + client := NewAuthClient(c.Conn()) + + resp, err := client.Credentials(ctx, &CredentialsRequest{ + Host: host, + }) + if err != nil { + if st, ok := status.FromError(errors.Cause(err)); ok && st.Code() == codes.Unimplemented { + return "", "", nil + } + return "", "", errors.WithStack(err) + } + return resp.Username, resp.Secret, nil + } +} diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.pb.go b/vendor/github.com/moby/buildkit/session/auth/auth.pb.go new file mode 100644 index 00000000..04afb3d6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/auth/auth.pb.go @@ -0,0 +1,740 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: auth.proto + +package auth + +import ( + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type CredentialsRequest struct { + Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` +} + +func (m *CredentialsRequest) Reset() { *m = CredentialsRequest{} } +func (*CredentialsRequest) ProtoMessage() {} +func (*CredentialsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{0} +} +func (m *CredentialsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CredentialsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CredentialsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CredentialsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CredentialsRequest.Merge(m, src) +} +func (m *CredentialsRequest) XXX_Size() int { + return m.Size() +} +func (m *CredentialsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CredentialsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CredentialsRequest proto.InternalMessageInfo + +func (m *CredentialsRequest) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +type CredentialsResponse struct { + Username string `protobuf:"bytes,1,opt,name=Username,proto3" json:"Username,omitempty"` + Secret string `protobuf:"bytes,2,opt,name=Secret,proto3" json:"Secret,omitempty"` +} + +func (m *CredentialsResponse) Reset() { *m = CredentialsResponse{} } +func (*CredentialsResponse) ProtoMessage() {} +func (*CredentialsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{1} +} +func (m *CredentialsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CredentialsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CredentialsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CredentialsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CredentialsResponse.Merge(m, src) +} +func (m *CredentialsResponse) XXX_Size() int { + return m.Size() +} +func (m *CredentialsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CredentialsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CredentialsResponse proto.InternalMessageInfo + +func (m *CredentialsResponse) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *CredentialsResponse) GetSecret() string { + if m != nil { + return m.Secret + } + return "" +} + +func init() { + proto.RegisterType((*CredentialsRequest)(nil), "moby.filesync.v1.CredentialsRequest") + proto.RegisterType((*CredentialsResponse)(nil), "moby.filesync.v1.CredentialsResponse") +} + +func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) } + +var fileDescriptor_8bbd6f3875b0e874 = []byte{ + // 233 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x2c, 0x2d, 0xc9, + 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, 0x4b, 0xcb, 0xcc, + 0x49, 0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x33, 0x54, 0xd2, 0xe0, 0x12, 0x72, 0x2e, 0x4a, 0x4d, + 0x49, 0xcd, 0x2b, 0xc9, 0x4c, 0xcc, 0x29, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x12, + 0xe2, 0x62, 0xf1, 0xc8, 0x2f, 0x2e, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x95, + 0x3c, 0xb9, 0x84, 0x51, 0x54, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x0a, 0x49, 0x71, 0x71, 0x84, + 0x16, 0xa7, 0x16, 0xe5, 0x25, 0xe6, 0xa6, 0x42, 0x95, 0xc3, 0xf9, 0x42, 0x62, 0x5c, 0x6c, 0xc1, + 0xa9, 0xc9, 0x45, 0xa9, 0x25, 0x12, 0x4c, 0x60, 0x19, 0x28, 0xcf, 0x28, 0x89, 0x8b, 0xc5, 0xb1, + 0xb4, 0x24, 0x43, 0x28, 0x8a, 0x8b, 0x1b, 0xc9, 0x48, 0x21, 0x15, 0x3d, 0x74, 0xe7, 0xe9, 0x61, + 0xba, 0x4d, 0x4a, 0x95, 0x80, 0x2a, 0x88, 0xbb, 0x9c, 0xac, 0x2e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, + 0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, + 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, + 0x7c, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, + 0x31, 0x44, 0xb1, 0x80, 0x02, 0x2b, 0x89, 0x0d, 0x1c, 0x5a, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x64, 0x61, 0x71, 0x59, 0x3b, 0x01, 0x00, 0x00, +} + +func (this *CredentialsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CredentialsRequest) + if !ok { + that2, ok := that.(CredentialsRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Host != that1.Host { + return false + } + return true +} +func (this *CredentialsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CredentialsResponse) + if !ok { + that2, ok := that.(CredentialsResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Username != that1.Username { + return false + } + if this.Secret != that1.Secret { + return false + } + return true +} +func (this *CredentialsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&auth.CredentialsRequest{") + s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CredentialsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&auth.CredentialsResponse{") + s = append(s, "Username: "+fmt.Sprintf("%#v", this.Username)+",\n") + s = append(s, "Secret: "+fmt.Sprintf("%#v", this.Secret)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAuth(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// AuthClient is the client API for Auth service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type AuthClient interface { + Credentials(ctx context.Context, in *CredentialsRequest, opts ...grpc.CallOption) (*CredentialsResponse, error) +} + +type authClient struct { + cc *grpc.ClientConn +} + +func NewAuthClient(cc *grpc.ClientConn) AuthClient { + return &authClient{cc} +} + +func (c *authClient) Credentials(ctx context.Context, in *CredentialsRequest, opts ...grpc.CallOption) (*CredentialsResponse, error) { + out := new(CredentialsResponse) + err := c.cc.Invoke(ctx, "/moby.filesync.v1.Auth/Credentials", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// AuthServer is the server API for Auth service. +type AuthServer interface { + Credentials(context.Context, *CredentialsRequest) (*CredentialsResponse, error) +} + +// UnimplementedAuthServer can be embedded to have forward compatible implementations. +type UnimplementedAuthServer struct { +} + +func (*UnimplementedAuthServer) Credentials(ctx context.Context, req *CredentialsRequest) (*CredentialsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Credentials not implemented") +} + +func RegisterAuthServer(s *grpc.Server, srv AuthServer) { + s.RegisterService(&_Auth_serviceDesc, srv) +} + +func _Auth_Credentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CredentialsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).Credentials(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.filesync.v1.Auth/Credentials", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).Credentials(ctx, req.(*CredentialsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Auth_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.filesync.v1.Auth", + HandlerType: (*AuthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Credentials", + Handler: _Auth_Credentials_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "auth.proto", +} + +func (m *CredentialsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CredentialsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CredentialsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CredentialsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CredentialsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CredentialsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Secret) > 0 { + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + } + if len(m.Username) > 0 { + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { + offset -= sovAuth(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CredentialsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + return n +} + +func (m *CredentialsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Username) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.Secret) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + return n +} + +func sovAuth(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAuth(x uint64) (n int) { + return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CredentialsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CredentialsRequest{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *CredentialsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CredentialsResponse{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func valueToStringAuth(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CredentialsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CredentialsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CredentialsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CredentialsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CredentialsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CredentialsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAuth(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAuth + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAuth + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAuth + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAuth = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.proto b/vendor/github.com/moby/buildkit/session/auth/auth.proto new file mode 100644 index 00000000..59331274 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/auth/auth.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package moby.filesync.v1; + +option go_package = "auth"; + +service Auth{ + rpc Credentials(CredentialsRequest) returns (CredentialsResponse); +} + + +message CredentialsRequest { + string Host = 1; +} + +message CredentialsResponse { + string Username = 1; + string Secret = 2; +} diff --git a/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go b/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go new file mode 100644 index 00000000..a7eb6f22 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go @@ -0,0 +1,53 @@ +package authprovider + +import ( + "context" + "io" + "sync" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/auth" + "google.golang.org/grpc" +) + +func NewDockerAuthProvider(stderr io.Writer) session.Attachable { + return &authProvider{ + config: config.LoadDefaultConfigFile(stderr), + } +} + +type authProvider struct { + config *configfile.ConfigFile + + // The need for this mutex is not well understood. + // Without it, the docker cli on OS X hangs when + // reading credentials from docker-credential-osxkeychain. + // See issue https://github.com/docker/cli/issues/1862 + mu sync.Mutex +} + +func (ap *authProvider) Register(server *grpc.Server) { + auth.RegisterAuthServer(server, ap) +} + +func (ap *authProvider) Credentials(ctx context.Context, req *auth.CredentialsRequest) (*auth.CredentialsResponse, error) { + ap.mu.Lock() + defer ap.mu.Unlock() + if req.Host == "registry-1.docker.io" { + req.Host = "https://index.docker.io/v1/" + } + ac, err := ap.config.GetAuthConfig(req.Host) + if err != nil { + return nil, err + } + res := &auth.CredentialsResponse{} + if ac.IdentityToken != "" { + res.Secret = ac.IdentityToken + } else { + res.Username = ac.Username + res.Secret = ac.Password + } + return res, nil +} diff --git a/vendor/github.com/moby/buildkit/session/auth/generate.go b/vendor/github.com/moby/buildkit/session/auth/generate.go new file mode 100644 index 00000000..687aa7cc --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/auth/generate.go @@ -0,0 +1,3 @@ +package auth + +//go:generate protoc --gogoslick_out=plugins=grpc:. auth.proto diff --git a/vendor/github.com/moby/buildkit/session/content/attachable.go b/vendor/github.com/moby/buildkit/session/content/attachable.go new file mode 100644 index 00000000..253b37a2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/content/attachable.go @@ -0,0 +1,132 @@ +package content + +import ( + "context" + + api "github.com/containerd/containerd/api/services/content/v1" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/services/content/contentserver" + "github.com/moby/buildkit/session" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// GRPCHeaderID is a gRPC header for store ID +const GRPCHeaderID = "buildkit-attachable-store-id" + +type attachableContentStore struct { + stores map[string]content.Store +} + +func (cs *attachableContentStore) choose(ctx context.Context) (content.Store, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, errors.Wrap(errdefs.ErrInvalidArgument, "request lacks metadata") + } + + values := md[GRPCHeaderID] + if len(values) == 0 { + return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "request lacks metadata %q", GRPCHeaderID) + } + id := values[0] + store, ok := cs.stores[id] + if !ok { + return nil, errors.Wrapf(errdefs.ErrNotFound, "unknown store %s", id) + } + return store, nil +} + +func (cs *attachableContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + store, err := cs.choose(ctx) + if err != nil { + return content.Info{}, err + } + return store.Info(ctx, dgst) +} + +func (cs *attachableContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + store, err := cs.choose(ctx) + if err != nil { + return content.Info{}, err + } + return store.Update(ctx, info, fieldpaths...) +} + +func (cs *attachableContentStore) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error { + store, err := cs.choose(ctx) + if err != nil { + return err + } + return store.Walk(ctx, fn, fs...) +} + +func (cs *attachableContentStore) Delete(ctx context.Context, dgst digest.Digest) error { + store, err := cs.choose(ctx) + if err != nil { + return err + } + return store.Delete(ctx, dgst) +} + +func (cs *attachableContentStore) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { + store, err := cs.choose(ctx) + if err != nil { + return nil, err + } + return store.ListStatuses(ctx, fs...) +} + +func (cs *attachableContentStore) Status(ctx context.Context, ref string) (content.Status, error) { + store, err := cs.choose(ctx) + if err != nil { + return content.Status{}, err + } + return store.Status(ctx, ref) +} + +func (cs *attachableContentStore) Abort(ctx context.Context, ref string) error { + store, err := cs.choose(ctx) + if err != nil { + return err + } + return store.Abort(ctx, ref) +} + +func (cs *attachableContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + store, err := cs.choose(ctx) + if err != nil { + return nil, err + } + return store.Writer(ctx, opts...) +} + +func (cs *attachableContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + store, err := cs.choose(ctx) + if err != nil { + return nil, err + } + return store.ReaderAt(ctx, desc) +} + +type attachable struct { + service api.ContentServer +} + +// NewAttachable creates session.Attachable from aggregated stores. +// A key of the store map is an ID string that is used for choosing underlying store. +func NewAttachable(stores map[string]content.Store) session.Attachable { + store := &attachableContentStore{stores: stores} + service := contentserver.New(store) + a := attachable{ + service: service, + } + return &a +} + +func (a *attachable) Register(server *grpc.Server) { + api.RegisterContentServer(server, a.service) +} diff --git a/vendor/github.com/moby/buildkit/session/content/caller.go b/vendor/github.com/moby/buildkit/session/content/caller.go new file mode 100644 index 00000000..70e82130 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/content/caller.go @@ -0,0 +1,91 @@ +package content + +import ( + "context" + + api "github.com/containerd/containerd/api/services/content/v1" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/content/proxy" + "github.com/moby/buildkit/session" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "google.golang.org/grpc/metadata" +) + +type callerContentStore struct { + store content.Store + storeID string +} + +func (cs *callerContentStore) choose(ctx context.Context) context.Context { + nsheader := metadata.Pairs(GRPCHeaderID, cs.storeID) + md, ok := metadata.FromOutgoingContext(ctx) // merge with outgoing context. + if !ok { + md = nsheader + } else { + // order ensures the latest is first in this list. + md = metadata.Join(nsheader, md) + } + return metadata.NewOutgoingContext(ctx, md) +} + +func (cs *callerContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + ctx = cs.choose(ctx) + info, err := cs.store.Info(ctx, dgst) + return info, errors.WithStack(err) +} + +func (cs *callerContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + ctx = cs.choose(ctx) + info, err := cs.store.Update(ctx, info, fieldpaths...) + return info, errors.WithStack(err) +} + +func (cs *callerContentStore) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error { + ctx = cs.choose(ctx) + return errors.WithStack(cs.store.Walk(ctx, fn, fs...)) +} + +func (cs *callerContentStore) Delete(ctx context.Context, dgst digest.Digest) error { + ctx = cs.choose(ctx) + return errors.WithStack(cs.store.Delete(ctx, dgst)) +} + +func (cs *callerContentStore) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { + ctx = cs.choose(ctx) + resp, err := cs.store.ListStatuses(ctx, fs...) + return resp, errors.WithStack(err) +} + +func (cs *callerContentStore) Status(ctx context.Context, ref string) (content.Status, error) { + ctx = cs.choose(ctx) + st, err := cs.store.Status(ctx, ref) + return st, errors.WithStack(err) +} + +func (cs *callerContentStore) Abort(ctx context.Context, ref string) error { + ctx = cs.choose(ctx) + return errors.WithStack(cs.store.Abort(ctx, ref)) +} + +func (cs *callerContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + ctx = cs.choose(ctx) + w, err := cs.store.Writer(ctx, opts...) + return w, errors.WithStack(err) +} + +func (cs *callerContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + ctx = cs.choose(ctx) + ra, err := cs.store.ReaderAt(ctx, desc) + return ra, errors.WithStack(err) +} + +// NewCallerStore creates content.Store from session.Caller with specified storeID +func NewCallerStore(c session.Caller, storeID string) content.Store { + client := api.NewContentClient(c.Conn()) + return &callerContentStore{ + store: proxy.NewContentStore(client), + storeID: storeID, + } +} diff --git a/vendor/github.com/moby/buildkit/session/context.go b/vendor/github.com/moby/buildkit/session/context.go new file mode 100644 index 00000000..31a29f08 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/context.go @@ -0,0 +1,22 @@ +package session + +import "context" + +type contextKeyT string + +var contextKey = contextKeyT("buildkit/session-id") + +func NewContext(ctx context.Context, id string) context.Context { + if id != "" { + return context.WithValue(ctx, contextKey, id) + } + return ctx +} + +func FromContext(ctx context.Context) string { + v := ctx.Value(contextKey) + if v == nil { + return "" + } + return v.(string) +} diff --git a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go new file mode 100644 index 00000000..f1d7d78e --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go @@ -0,0 +1,117 @@ +package filesync + +import ( + "bufio" + io "io" + "os" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/tonistiigi/fsutil" + fstypes "github.com/tonistiigi/fsutil/types" + "google.golang.org/grpc" +) + +func sendDiffCopy(stream grpc.Stream, fs fsutil.FS, progress progressCb) error { + return errors.WithStack(fsutil.Send(stream.Context(), stream, fs, progress)) +} + +func newStreamWriter(stream grpc.ClientStream) io.WriteCloser { + wc := &streamWriterCloser{ClientStream: stream} + return &bufferedWriteCloser{Writer: bufio.NewWriter(wc), Closer: wc} +} + +type bufferedWriteCloser struct { + *bufio.Writer + io.Closer +} + +func (bwc *bufferedWriteCloser) Close() error { + if err := bwc.Writer.Flush(); err != nil { + return errors.WithStack(err) + } + return bwc.Closer.Close() +} + +type streamWriterCloser struct { + grpc.ClientStream +} + +func (wc *streamWriterCloser) Write(dt []byte) (int, error) { + if err := wc.ClientStream.SendMsg(&BytesMessage{Data: dt}); err != nil { + // SendMsg return EOF on remote errors + if errors.Cause(err) == io.EOF { + if err := errors.WithStack(wc.ClientStream.RecvMsg(struct{}{})); err != nil { + return 0, err + } + } + return 0, errors.WithStack(err) + } + return len(dt), nil +} + +func (wc *streamWriterCloser) Close() error { + if err := wc.ClientStream.CloseSend(); err != nil { + return errors.WithStack(err) + } + // block until receiver is done + var bm BytesMessage + if err := wc.ClientStream.RecvMsg(&bm); err != io.EOF { + return errors.WithStack(err) + } + return nil +} + +func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb, filter func(string, *fstypes.Stat) bool) error { + st := time.Now() + defer func() { + logrus.Debugf("diffcopy took: %v", time.Since(st)) + }() + var cf fsutil.ChangeFunc + var ch fsutil.ContentHasher + if cu != nil { + cu.MarkSupported(true) + cf = cu.HandleChange + ch = cu.ContentHasher() + } + return errors.WithStack(fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{ + NotifyHashed: cf, + ContentHasher: ch, + ProgressCb: progress, + Filter: fsutil.FilterFunc(filter), + })) +} + +func syncTargetDiffCopy(ds grpc.Stream, dest string) error { + if err := os.MkdirAll(dest, 0700); err != nil { + return errors.Wrapf(err, "failed to create synctarget dest dir %s", dest) + } + return errors.WithStack(fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{ + Merge: true, + Filter: func() func(string, *fstypes.Stat) bool { + uid := os.Getuid() + gid := os.Getgid() + return func(p string, st *fstypes.Stat) bool { + st.Uid = uint32(uid) + st.Gid = uint32(gid) + return true + } + }(), + })) +} + +func writeTargetFile(ds grpc.Stream, wc io.WriteCloser) error { + for { + bm := BytesMessage{} + if err := ds.RecvMsg(&bm); err != nil { + if errors.Cause(err) == io.EOF { + return nil + } + return errors.WithStack(err) + } + if _, err := wc.Write(bm.Data); err != nil { + return errors.WithStack(err) + } + } +} diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.go new file mode 100644 index 00000000..51dd3c53 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.go @@ -0,0 +1,326 @@ +package filesync + +import ( + "context" + "fmt" + io "io" + "os" + "strings" + + "github.com/moby/buildkit/session" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" + fstypes "github.com/tonistiigi/fsutil/types" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const ( + keyOverrideExcludes = "override-excludes" + keyIncludePatterns = "include-patterns" + keyExcludePatterns = "exclude-patterns" + keyFollowPaths = "followpaths" + keyDirName = "dir-name" + keyExporterMetaPrefix = "exporter-md-" +) + +type fsSyncProvider struct { + dirs map[string]SyncedDir + p progressCb + doneCh chan error +} + +type SyncedDir struct { + Name string + Dir string + Excludes []string + Map func(string, *fstypes.Stat) bool +} + +// NewFSSyncProvider creates a new provider for sending files from client +func NewFSSyncProvider(dirs []SyncedDir) session.Attachable { + p := &fsSyncProvider{ + dirs: map[string]SyncedDir{}, + } + for _, d := range dirs { + p.dirs[d.Name] = d + } + return p +} + +func (sp *fsSyncProvider) Register(server *grpc.Server) { + RegisterFileSyncServer(server, sp) +} + +func (sp *fsSyncProvider) DiffCopy(stream FileSync_DiffCopyServer) error { + return sp.handle("diffcopy", stream) +} +func (sp *fsSyncProvider) TarStream(stream FileSync_TarStreamServer) error { + return sp.handle("tarstream", stream) +} + +func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retErr error) { + var pr *protocol + for _, p := range supportedProtocols { + if method == p.name && isProtoSupported(p.name) { + pr = &p + break + } + } + if pr == nil { + return errors.New("failed to negotiate protocol") + } + + opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object + + dirName := "" + name, ok := opts[keyDirName] + if ok && len(name) > 0 { + dirName = name[0] + } + + dir, ok := sp.dirs[dirName] + if !ok { + return status.Errorf(codes.NotFound, "no access allowed to dir %q", dirName) + } + + excludes := opts[keyExcludePatterns] + if len(dir.Excludes) != 0 && (len(opts[keyOverrideExcludes]) == 0 || opts[keyOverrideExcludes][0] != "true") { + excludes = dir.Excludes + } + includes := opts[keyIncludePatterns] + + followPaths := opts[keyFollowPaths] + + var progress progressCb + if sp.p != nil { + progress = sp.p + sp.p = nil + } + + var doneCh chan error + if sp.doneCh != nil { + doneCh = sp.doneCh + sp.doneCh = nil + } + err := pr.sendFn(stream, fsutil.NewFS(dir.Dir, &fsutil.WalkOpt{ + ExcludePatterns: excludes, + IncludePatterns: includes, + FollowPaths: followPaths, + Map: dir.Map, + }), progress) + if doneCh != nil { + if err != nil { + doneCh <- err + } + close(doneCh) + } + return err +} + +func (sp *fsSyncProvider) SetNextProgressCallback(f func(int, bool), doneCh chan error) { + sp.p = f + sp.doneCh = doneCh +} + +type progressCb func(int, bool) + +type protocol struct { + name string + sendFn func(stream grpc.Stream, fs fsutil.FS, progress progressCb) error + recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb, mapFunc func(string, *fstypes.Stat) bool) error +} + +func isProtoSupported(p string) bool { + // TODO: this should be removed after testing if stability is confirmed + if override := os.Getenv("BUILD_STREAM_PROTOCOL"); override != "" { + return strings.EqualFold(p, override) + } + return true +} + +var supportedProtocols = []protocol{ + { + name: "diffcopy", + sendFn: sendDiffCopy, + recvFn: recvDiffCopy, + }, +} + +// FSSendRequestOpt defines options for FSSend request +type FSSendRequestOpt struct { + Name string + IncludePatterns []string + ExcludePatterns []string + FollowPaths []string + OverrideExcludes bool // deprecated: this is used by docker/cli for automatically loading .dockerignore from the directory + DestDir string + CacheUpdater CacheUpdater + ProgressCb func(int, bool) + Filter func(string, *fstypes.Stat) bool +} + +// CacheUpdater is an object capable of sending notifications for the cache hash changes +type CacheUpdater interface { + MarkSupported(bool) + HandleChange(fsutil.ChangeKind, string, os.FileInfo, error) error + ContentHasher() fsutil.ContentHasher +} + +// FSSync initializes a transfer of files +func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error { + var pr *protocol + for _, p := range supportedProtocols { + if isProtoSupported(p.name) && c.Supports(session.MethodURL(_FileSync_serviceDesc.ServiceName, p.name)) { + pr = &p + break + } + } + if pr == nil { + return errors.New("no local sources enabled") + } + + opts := make(map[string][]string) + if opt.OverrideExcludes { + opts[keyOverrideExcludes] = []string{"true"} + } + + if opt.IncludePatterns != nil { + opts[keyIncludePatterns] = opt.IncludePatterns + } + + if opt.ExcludePatterns != nil { + opts[keyExcludePatterns] = opt.ExcludePatterns + } + + if opt.FollowPaths != nil { + opts[keyFollowPaths] = opt.FollowPaths + } + + opts[keyDirName] = []string{opt.Name} + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client := NewFileSyncClient(c.Conn()) + + var stream grpc.ClientStream + + ctx = metadata.NewOutgoingContext(ctx, opts) + + switch pr.name { + case "tarstream": + cc, err := client.TarStream(ctx) + if err != nil { + return err + } + stream = cc + case "diffcopy": + cc, err := client.DiffCopy(ctx) + if err != nil { + return err + } + stream = cc + default: + panic(fmt.Sprintf("invalid protocol: %q", pr.name)) + } + + return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb, opt.Filter) +} + +// NewFSSyncTargetDir allows writing into a directory +func NewFSSyncTargetDir(outdir string) session.Attachable { + p := &fsSyncTarget{ + outdir: outdir, + } + return p +} + +// NewFSSyncTarget allows writing into an io.WriteCloser +func NewFSSyncTarget(f func(map[string]string) (io.WriteCloser, error)) session.Attachable { + p := &fsSyncTarget{ + f: f, + } + return p +} + +type fsSyncTarget struct { + outdir string + f func(map[string]string) (io.WriteCloser, error) +} + +func (sp *fsSyncTarget) Register(server *grpc.Server) { + RegisterFileSendServer(server, sp) +} + +func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) (err error) { + if sp.outdir != "" { + return syncTargetDiffCopy(stream, sp.outdir) + } + + if sp.f == nil { + return errors.New("empty outfile and outdir") + } + opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object + md := map[string]string{} + for k, v := range opts { + if strings.HasPrefix(k, keyExporterMetaPrefix) { + md[strings.TrimPrefix(k, keyExporterMetaPrefix)] = strings.Join(v, ",") + } + } + wc, err := sp.f(md) + if err != nil { + return err + } + if wc == nil { + return status.Errorf(codes.AlreadyExists, "target already exists") + } + defer func() { + err1 := wc.Close() + if err != nil { + err = err1 + } + }() + return writeTargetFile(stream, wc) +} + +func CopyToCaller(ctx context.Context, fs fsutil.FS, c session.Caller, progress func(int, bool)) error { + method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy") + if !c.Supports(method) { + return errors.Errorf("method %s not supported by the client", method) + } + + client := NewFileSendClient(c.Conn()) + + cc, err := client.DiffCopy(ctx) + if err != nil { + return errors.WithStack(err) + } + + return sendDiffCopy(cc, fs, progress) +} + +func CopyFileWriter(ctx context.Context, md map[string]string, c session.Caller) (io.WriteCloser, error) { + method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy") + if !c.Supports(method) { + return nil, errors.Errorf("method %s not supported by the client", method) + } + + client := NewFileSendClient(c.Conn()) + + opts := make(map[string][]string, len(md)) + for k, v := range md { + opts[keyExporterMetaPrefix+k] = []string{v} + } + + ctx = metadata.NewOutgoingContext(ctx, opts) + + cc, err := client.DiffCopy(ctx) + if err != nil { + return nil, errors.WithStack(err) + } + + return newStreamWriter(cc), nil +} diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go new file mode 100644 index 00000000..993834a6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go @@ -0,0 +1,675 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: filesync.proto + +package filesync + +import ( + bytes "bytes" + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// BytesMessage contains a chunk of byte data +type BytesMessage struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *BytesMessage) Reset() { *m = BytesMessage{} } +func (*BytesMessage) ProtoMessage() {} +func (*BytesMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_d1042549f1f24495, []int{0} +} +func (m *BytesMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BytesMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BytesMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BytesMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesMessage.Merge(m, src) +} +func (m *BytesMessage) XXX_Size() int { + return m.Size() +} +func (m *BytesMessage) XXX_DiscardUnknown() { + xxx_messageInfo_BytesMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesMessage proto.InternalMessageInfo + +func (m *BytesMessage) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterType((*BytesMessage)(nil), "moby.filesync.v1.BytesMessage") +} + +func init() { proto.RegisterFile("filesync.proto", fileDescriptor_d1042549f1f24495) } + +var fileDescriptor_d1042549f1f24495 = []byte{ + // 217 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49, + 0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa, + 0xd4, 0x83, 0x0b, 0x96, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6, + 0x16, 0x17, 0x27, 0xa6, 0xa7, 0x0a, 0x09, 0x71, 0xb1, 0xa4, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x2a, + 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xab, 0x19, 0xb9, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x83, + 0x2b, 0xf3, 0x92, 0x85, 0xfc, 0xb8, 0x38, 0x5c, 0x32, 0xd3, 0xd2, 0x9c, 0xf3, 0x0b, 0x2a, 0x85, + 0xe4, 0xf4, 0xd0, 0xcd, 0xd3, 0x43, 0x36, 0x4c, 0x8a, 0x80, 0xbc, 0x06, 0xa3, 0x01, 0xa3, 0x90, + 0x3f, 0x17, 0x67, 0x48, 0x62, 0x51, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x35, 0x0c, 0x34, 0x8a, + 0x82, 0x3a, 0x36, 0x35, 0x2f, 0x85, 0xda, 0x8e, 0x75, 0xb2, 0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, + 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, + 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, + 0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, + 0xc7, 0x10, 0xc5, 0x01, 0x33, 0x33, 0x89, 0x0d, 0x1c, 0x0d, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x5e, 0xce, 0x52, 0xb3, 0x98, 0x01, 0x00, 0x00, +} + +func (this *BytesMessage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BytesMessage) + if !ok { + that2, ok := that.(BytesMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *BytesMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&filesync.BytesMessage{") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFilesync(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// FileSyncClient is the client API for FileSync service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type FileSyncClient interface { + DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error) + TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error) +} + +type fileSyncClient struct { + cc *grpc.ClientConn +} + +func NewFileSyncClient(cc *grpc.ClientConn) FileSyncClient { + return &fileSyncClient{cc} +} + +func (c *fileSyncClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error) { + stream, err := c.cc.NewStream(ctx, &_FileSync_serviceDesc.Streams[0], "/moby.filesync.v1.FileSync/DiffCopy", opts...) + if err != nil { + return nil, err + } + x := &fileSyncDiffCopyClient{stream} + return x, nil +} + +type FileSync_DiffCopyClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type fileSyncDiffCopyClient struct { + grpc.ClientStream +} + +func (x *fileSyncDiffCopyClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *fileSyncDiffCopyClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *fileSyncClient) TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_FileSync_serviceDesc.Streams[1], "/moby.filesync.v1.FileSync/TarStream", opts...) + if err != nil { + return nil, err + } + x := &fileSyncTarStreamClient{stream} + return x, nil +} + +type FileSync_TarStreamClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type fileSyncTarStreamClient struct { + grpc.ClientStream +} + +func (x *fileSyncTarStreamClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *fileSyncTarStreamClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// FileSyncServer is the server API for FileSync service. +type FileSyncServer interface { + DiffCopy(FileSync_DiffCopyServer) error + TarStream(FileSync_TarStreamServer) error +} + +// UnimplementedFileSyncServer can be embedded to have forward compatible implementations. +type UnimplementedFileSyncServer struct { +} + +func (*UnimplementedFileSyncServer) DiffCopy(srv FileSync_DiffCopyServer) error { + return status.Errorf(codes.Unimplemented, "method DiffCopy not implemented") +} +func (*UnimplementedFileSyncServer) TarStream(srv FileSync_TarStreamServer) error { + return status.Errorf(codes.Unimplemented, "method TarStream not implemented") +} + +func RegisterFileSyncServer(s *grpc.Server, srv FileSyncServer) { + s.RegisterService(&_FileSync_serviceDesc, srv) +} + +func _FileSync_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FileSyncServer).DiffCopy(&fileSyncDiffCopyServer{stream}) +} + +type FileSync_DiffCopyServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type fileSyncDiffCopyServer struct { + grpc.ServerStream +} + +func (x *fileSyncDiffCopyServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *fileSyncDiffCopyServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _FileSync_TarStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FileSyncServer).TarStream(&fileSyncTarStreamServer{stream}) +} + +type FileSync_TarStreamServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type fileSyncTarStreamServer struct { + grpc.ServerStream +} + +func (x *fileSyncTarStreamServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *fileSyncTarStreamServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _FileSync_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.filesync.v1.FileSync", + HandlerType: (*FileSyncServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "DiffCopy", + Handler: _FileSync_DiffCopy_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "TarStream", + Handler: _FileSync_TarStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "filesync.proto", +} + +// FileSendClient is the client API for FileSend service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type FileSendClient interface { + DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error) +} + +type fileSendClient struct { + cc *grpc.ClientConn +} + +func NewFileSendClient(cc *grpc.ClientConn) FileSendClient { + return &fileSendClient{cc} +} + +func (c *fileSendClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error) { + stream, err := c.cc.NewStream(ctx, &_FileSend_serviceDesc.Streams[0], "/moby.filesync.v1.FileSend/DiffCopy", opts...) + if err != nil { + return nil, err + } + x := &fileSendDiffCopyClient{stream} + return x, nil +} + +type FileSend_DiffCopyClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type fileSendDiffCopyClient struct { + grpc.ClientStream +} + +func (x *fileSendDiffCopyClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *fileSendDiffCopyClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// FileSendServer is the server API for FileSend service. +type FileSendServer interface { + DiffCopy(FileSend_DiffCopyServer) error +} + +// UnimplementedFileSendServer can be embedded to have forward compatible implementations. +type UnimplementedFileSendServer struct { +} + +func (*UnimplementedFileSendServer) DiffCopy(srv FileSend_DiffCopyServer) error { + return status.Errorf(codes.Unimplemented, "method DiffCopy not implemented") +} + +func RegisterFileSendServer(s *grpc.Server, srv FileSendServer) { + s.RegisterService(&_FileSend_serviceDesc, srv) +} + +func _FileSend_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FileSendServer).DiffCopy(&fileSendDiffCopyServer{stream}) +} + +type FileSend_DiffCopyServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type fileSendDiffCopyServer struct { + grpc.ServerStream +} + +func (x *fileSendDiffCopyServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *fileSendDiffCopyServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _FileSend_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.filesync.v1.FileSend", + HandlerType: (*FileSendServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "DiffCopy", + Handler: _FileSend_DiffCopy_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "filesync.proto", +} + +func (m *BytesMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BytesMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintFilesync(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintFilesync(dAtA []byte, offset int, v uint64) int { + offset -= sovFilesync(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BytesMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovFilesync(uint64(l)) + } + return n +} + +func sovFilesync(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFilesync(x uint64) (n int) { + return sovFilesync(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BytesMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BytesMessage{`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func valueToStringFilesync(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BytesMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthFilesync + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthFilesync + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFilesync(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFilesync + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFilesync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFilesync(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilesync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilesync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilesync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFilesync + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupFilesync + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthFilesync + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthFilesync = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFilesync = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupFilesync = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.proto b/vendor/github.com/moby/buildkit/session/filesync/filesync.proto new file mode 100644 index 00000000..0ae29373 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package moby.filesync.v1; + +option go_package = "filesync"; + +service FileSync{ + rpc DiffCopy(stream BytesMessage) returns (stream BytesMessage); + rpc TarStream(stream BytesMessage) returns (stream BytesMessage); +} + +service FileSend{ + rpc DiffCopy(stream BytesMessage) returns (stream BytesMessage); +} + + +// BytesMessage contains a chunk of byte data +message BytesMessage{ + bytes data = 1; +} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/session/filesync/generate.go b/vendor/github.com/moby/buildkit/session/filesync/generate.go new file mode 100644 index 00000000..261e8762 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/filesync/generate.go @@ -0,0 +1,3 @@ +package filesync + +//go:generate protoc --gogoslick_out=plugins=grpc:. filesync.proto diff --git a/vendor/github.com/moby/buildkit/session/grpc.go b/vendor/github.com/moby/buildkit/session/grpc.go new file mode 100644 index 00000000..2798b6ab --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/grpc.go @@ -0,0 +1,81 @@ +package session + +import ( + "context" + "net" + "sync/atomic" + "time" + + "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/net/http2" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" +) + +func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) { + go func() { + <-ctx.Done() + conn.Close() + }() + logrus.Debugf("serving grpc connection") + (&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer}) +} + +func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.ClientConn, error) { + var dialCount int64 + dialer := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) { + if c := atomic.AddInt64(&dialCount, 1); c > 1 { + return nil, errors.Errorf("only one connection allowed") + } + return conn, nil + }) + + dialOpts := []grpc.DialOption{ + dialer, + grpc.WithInsecure(), + } + + if span := opentracing.SpanFromContext(ctx); span != nil { + tracer := span.Tracer() + dialOpts = append(dialOpts, + grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer, traceFilter())), + grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer, traceFilter())), + ) + } + + cc, err := grpc.DialContext(ctx, "", dialOpts...) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to create grpc client") + } + + ctx, cancel := context.WithCancel(ctx) + go monitorHealth(ctx, cc, cancel) + + return ctx, cc, nil +} + +func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func()) { + defer cancelConn() + defer cc.Close() + + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + healthClient := grpc_health_v1.NewHealthClient(cc) + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + _, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) + cancel() + if err != nil { + return + } + } + } +} diff --git a/vendor/github.com/moby/buildkit/session/grpchijack/dial.go b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go new file mode 100644 index 00000000..2486563e --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go @@ -0,0 +1,162 @@ +package grpchijack + +import ( + "context" + "io" + "net" + "strings" + "sync" + "time" + + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/session" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +func Dialer(api controlapi.ControlClient) session.Dialer { + return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { + + meta = lowerHeaders(meta) + + md := metadata.MD(meta) + + ctx = metadata.NewOutgoingContext(ctx, md) + + stream, err := api.Session(ctx) + if err != nil { + return nil, err + } + + c, _ := streamToConn(stream) + return c, nil + } +} + +func streamToConn(stream grpc.Stream) (net.Conn, <-chan struct{}) { + closeCh := make(chan struct{}) + c := &conn{stream: stream, buf: make([]byte, 32*1<<10), closeCh: closeCh} + return c, closeCh +} + +type conn struct { + stream grpc.Stream + buf []byte + lastBuf []byte + + closedOnce sync.Once + readMu sync.Mutex + writeMu sync.Mutex + err error + closeCh chan struct{} +} + +func (c *conn) Read(b []byte) (n int, err error) { + c.readMu.Lock() + defer c.readMu.Unlock() + + if c.lastBuf != nil { + n := copy(b, c.lastBuf) + c.lastBuf = c.lastBuf[n:] + if len(c.lastBuf) == 0 { + c.lastBuf = nil + } + return n, nil + } + m := new(controlapi.BytesMessage) + m.Data = c.buf + + if err := c.stream.RecvMsg(m); err != nil { + return 0, err + } + c.buf = m.Data[:cap(m.Data)] + + n = copy(b, m.Data) + if n < len(m.Data) { + c.lastBuf = m.Data[n:] + } + + return n, nil +} + +func (c *conn) Write(b []byte) (int, error) { + c.writeMu.Lock() + defer c.writeMu.Unlock() + m := &controlapi.BytesMessage{Data: b} + if err := c.stream.SendMsg(m); err != nil { + return 0, err + } + return len(b), nil +} + +func (c *conn) Close() (err error) { + c.closedOnce.Do(func() { + defer func() { + close(c.closeCh) + }() + + if cs, ok := c.stream.(grpc.ClientStream); ok { + c.writeMu.Lock() + err = cs.CloseSend() + c.writeMu.Unlock() + if err != nil { + return + } + } + + c.readMu.Lock() + for { + m := new(controlapi.BytesMessage) + m.Data = c.buf + err = c.stream.RecvMsg(m) + if err != nil { + if err != io.EOF { + c.readMu.Unlock() + return + } + err = nil + break + } + c.buf = m.Data[:cap(m.Data)] + c.lastBuf = append(c.lastBuf, c.buf...) + } + c.readMu.Unlock() + + }) + return nil +} + +func (c *conn) LocalAddr() net.Addr { + return dummyAddr{} +} +func (c *conn) RemoteAddr() net.Addr { + return dummyAddr{} +} +func (c *conn) SetDeadline(t time.Time) error { + return nil +} +func (c *conn) SetReadDeadline(t time.Time) error { + return nil +} +func (c *conn) SetWriteDeadline(t time.Time) error { + return nil +} + +type dummyAddr struct { +} + +func (d dummyAddr) Network() string { + return "tcp" +} + +func (d dummyAddr) String() string { + return "localhost" +} + +func lowerHeaders(in map[string][]string) map[string][]string { + out := map[string][]string{} + for k := range in { + out[strings.ToLower(k)] = in[k] + } + return out +} diff --git a/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go b/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go new file mode 100644 index 00000000..096a9e80 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go @@ -0,0 +1,15 @@ +package grpchijack + +import ( + "net" + + controlapi "github.com/moby/buildkit/api/services/control" + "google.golang.org/grpc/metadata" +) + +// Hijack hijacks session to a connection. +func Hijack(stream controlapi.Control_SessionServer) (net.Conn, <-chan struct{}, map[string][]string) { + md, _ := metadata.FromIncomingContext(stream.Context()) + c, closeCh := streamToConn(stream) + return c, closeCh, md +} diff --git a/vendor/github.com/moby/buildkit/session/manager.go b/vendor/github.com/moby/buildkit/session/manager.go new file mode 100644 index 00000000..e01b047e --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/manager.go @@ -0,0 +1,220 @@ +package session + +import ( + "context" + "net" + "net/http" + "strings" + "sync" + + "github.com/pkg/errors" + "google.golang.org/grpc" +) + +// Caller can invoke requests on the session +type Caller interface { + Context() context.Context + Supports(method string) bool + Conn() *grpc.ClientConn + Name() string + SharedKey() string +} + +type client struct { + Session + cc *grpc.ClientConn + supported map[string]struct{} +} + +// Manager is a controller for accessing currently active sessions +type Manager struct { + sessions map[string]*client + mu sync.Mutex + updateCondition *sync.Cond +} + +// NewManager returns a new Manager +func NewManager() (*Manager, error) { + sm := &Manager{ + sessions: make(map[string]*client), + } + sm.updateCondition = sync.NewCond(&sm.mu) + return sm, nil +} + +// HandleHTTPRequest handles an incoming HTTP request +func (sm *Manager) HandleHTTPRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error { + hijacker, ok := w.(http.Hijacker) + if !ok { + return errors.New("handler does not support hijack") + } + + id := r.Header.Get(headerSessionID) + + proto := r.Header.Get("Upgrade") + + sm.mu.Lock() + if _, ok := sm.sessions[id]; ok { + sm.mu.Unlock() + return errors.Errorf("session %s already exists", id) + } + + if proto == "" { + sm.mu.Unlock() + return errors.New("no upgrade proto in request") + } + + if proto != "h2c" { + sm.mu.Unlock() + return errors.Errorf("protocol %s not supported", proto) + } + + conn, _, err := hijacker.Hijack() + if err != nil { + sm.mu.Unlock() + return errors.Wrap(err, "failed to hijack connection") + } + + resp := &http.Response{ + StatusCode: http.StatusSwitchingProtocols, + ProtoMajor: 1, + ProtoMinor: 1, + Header: http.Header{}, + } + resp.Header.Set("Connection", "Upgrade") + resp.Header.Set("Upgrade", proto) + + // set raw mode + conn.Write([]byte{}) + resp.Write(conn) + + return sm.handleConn(ctx, conn, r.Header) +} + +// HandleConn handles an incoming raw connection +func (sm *Manager) HandleConn(ctx context.Context, conn net.Conn, opts map[string][]string) error { + sm.mu.Lock() + return sm.handleConn(ctx, conn, opts) +} + +// caller needs to take lock, this function will release it +func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[string][]string) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + opts = canonicalHeaders(opts) + + h := http.Header(opts) + id := h.Get(headerSessionID) + name := h.Get(headerSessionName) + sharedKey := h.Get(headerSessionSharedKey) + + ctx, cc, err := grpcClientConn(ctx, conn) + if err != nil { + sm.mu.Unlock() + return err + } + + c := &client{ + Session: Session{ + id: id, + name: name, + sharedKey: sharedKey, + ctx: ctx, + cancelCtx: cancel, + done: make(chan struct{}), + }, + cc: cc, + supported: make(map[string]struct{}), + } + + for _, m := range opts[headerSessionMethod] { + c.supported[strings.ToLower(m)] = struct{}{} + } + sm.sessions[id] = c + sm.updateCondition.Broadcast() + sm.mu.Unlock() + + defer func() { + sm.mu.Lock() + delete(sm.sessions, id) + sm.mu.Unlock() + }() + + <-c.ctx.Done() + conn.Close() + close(c.done) + + return nil +} + +// Get returns a session by ID +func (sm *Manager) Get(ctx context.Context, id string) (Caller, error) { + // session prefix is used to identify vertexes with different contexts so + // they would not collide, but for lookup we don't need the prefix + if p := strings.SplitN(id, ":", 2); len(p) == 2 && len(p[1]) > 0 { + id = p[1] + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + go func() { + select { + case <-ctx.Done(): + sm.mu.Lock() + sm.updateCondition.Broadcast() + sm.mu.Unlock() + } + }() + + var c *client + + sm.mu.Lock() + for { + select { + case <-ctx.Done(): + sm.mu.Unlock() + return nil, errors.Wrapf(ctx.Err(), "no active session for %s", id) + default: + } + var ok bool + c, ok = sm.sessions[id] + if !ok || c.closed() { + sm.updateCondition.Wait() + continue + } + sm.mu.Unlock() + break + } + + return c, nil +} + +func (c *client) Context() context.Context { + return c.context() +} + +func (c *client) Name() string { + return c.name +} + +func (c *client) SharedKey() string { + return c.sharedKey +} + +func (c *client) Supports(url string) bool { + _, ok := c.supported[strings.ToLower(url)] + return ok +} +func (c *client) Conn() *grpc.ClientConn { + return c.cc +} + +func canonicalHeaders(in map[string][]string) map[string][]string { + out := map[string][]string{} + for k := range in { + out[http.CanonicalHeaderKey(k)] = in[k] + } + return out +} diff --git a/vendor/github.com/moby/buildkit/session/secrets/generate.go b/vendor/github.com/moby/buildkit/session/secrets/generate.go new file mode 100644 index 00000000..68716a95 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/secrets/generate.go @@ -0,0 +1,3 @@ +package secrets + +//go:generate protoc --gogoslick_out=plugins=grpc:. secrets.proto diff --git a/vendor/github.com/moby/buildkit/session/secrets/secrets.go b/vendor/github.com/moby/buildkit/session/secrets/secrets.go new file mode 100644 index 00000000..3f3bb644 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/secrets/secrets.go @@ -0,0 +1,30 @@ +package secrets + +import ( + "context" + + "github.com/moby/buildkit/session" + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type SecretStore interface { + GetSecret(context.Context, string) ([]byte, error) +} + +var ErrNotFound = errors.Errorf("not found") + +func GetSecret(ctx context.Context, c session.Caller, id string) ([]byte, error) { + client := NewSecretsClient(c.Conn()) + resp, err := client.GetSecret(ctx, &GetSecretRequest{ + ID: id, + }) + if err != nil { + if st, ok := status.FromError(errors.Cause(err)); ok && (st.Code() == codes.Unimplemented || st.Code() == codes.NotFound) { + return nil, errors.Wrapf(ErrNotFound, "secret %s not found", id) + } + return nil, errors.WithStack(err) + } + return resp.Data, nil +} diff --git a/vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go b/vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go new file mode 100644 index 00000000..eb49bebe --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go @@ -0,0 +1,886 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: secrets.proto + +package secrets + +import ( + bytes "bytes" + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type GetSecretRequest struct { + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Annotations map[string]string `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *GetSecretRequest) Reset() { *m = GetSecretRequest{} } +func (*GetSecretRequest) ProtoMessage() {} +func (*GetSecretRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_d4bc6c625e214507, []int{0} +} +func (m *GetSecretRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetSecretRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetSecretRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetSecretRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSecretRequest.Merge(m, src) +} +func (m *GetSecretRequest) XXX_Size() int { + return m.Size() +} +func (m *GetSecretRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetSecretRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSecretRequest proto.InternalMessageInfo + +func (m *GetSecretRequest) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *GetSecretRequest) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +type GetSecretResponse struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *GetSecretResponse) Reset() { *m = GetSecretResponse{} } +func (*GetSecretResponse) ProtoMessage() {} +func (*GetSecretResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_d4bc6c625e214507, []int{1} +} +func (m *GetSecretResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetSecretResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetSecretResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetSecretResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetSecretResponse.Merge(m, src) +} +func (m *GetSecretResponse) XXX_Size() int { + return m.Size() +} +func (m *GetSecretResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetSecretResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetSecretResponse proto.InternalMessageInfo + +func (m *GetSecretResponse) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterType((*GetSecretRequest)(nil), "moby.buildkit.secrets.v1.GetSecretRequest") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.secrets.v1.GetSecretRequest.AnnotationsEntry") + proto.RegisterType((*GetSecretResponse)(nil), "moby.buildkit.secrets.v1.GetSecretResponse") +} + +func init() { proto.RegisterFile("secrets.proto", fileDescriptor_d4bc6c625e214507) } + +var fileDescriptor_d4bc6c625e214507 = []byte{ + // 288 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0x4e, 0x4d, 0x2e, + 0x4a, 0x2d, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, + 0x4b, 0x2a, 0xcd, 0xcc, 0x49, 0xc9, 0xce, 0x2c, 0xd1, 0x83, 0x49, 0x96, 0x19, 0x2a, 0x1d, 0x64, + 0xe4, 0x12, 0x70, 0x4f, 0x2d, 0x09, 0x06, 0x8b, 0x04, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, + 0xf1, 0x71, 0x31, 0x79, 0xba, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x31, 0x79, 0xba, 0x08, + 0xc5, 0x72, 0x71, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7, 0x15, 0x4b, 0x30, + 0x29, 0x30, 0x6b, 0x70, 0x1b, 0x59, 0xeb, 0xe1, 0x32, 0x54, 0x0f, 0xdd, 0x40, 0x3d, 0x47, 0x84, + 0x6e, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x64, 0xf3, 0xa4, 0xec, 0xb8, 0x04, 0xd0, 0x15, 0x08, + 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0xdd, 0x00, 0x62, 0x0a, 0x89, 0x70, 0xb1, 0x96, 0x25, + 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b, 0x46, 0x25, 0x75, 0x2e, + 0x41, 0x24, 0x1b, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0x84, 0xb8, 0x58, 0x52, 0x12, 0x4b, + 0x12, 0xc1, 0x26, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xf9, 0x5c, 0xec, 0x10, 0x55, 0xc5, 0x42, 0x29, + 0x5c, 0x9c, 0x70, 0x3d, 0x42, 0x5a, 0xc4, 0x7b, 0x45, 0x4a, 0x9b, 0x28, 0xb5, 0x10, 0x47, 0x38, + 0xd9, 0x5e, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f, + 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, + 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, + 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x62, 0x87, 0x9a, 0x99, 0xc4, 0x06, 0x8e, + 0x3d, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x38, 0xec, 0x1f, 0xce, 0x01, 0x00, 0x00, +} + +func (this *GetSecretRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GetSecretRequest) + if !ok { + that2, ok := that.(GetSecretRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ID != that1.ID { + return false + } + if len(this.Annotations) != len(that1.Annotations) { + return false + } + for i := range this.Annotations { + if this.Annotations[i] != that1.Annotations[i] { + return false + } + } + return true +} +func (this *GetSecretResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GetSecretResponse) + if !ok { + that2, ok := that.(GetSecretResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *GetSecretRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&secrets.GetSecretRequest{") + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k, _ := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%#v: %#v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + if this.Annotations != nil { + s = append(s, "Annotations: "+mapStringForAnnotations+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GetSecretResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&secrets.GetSecretResponse{") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringSecrets(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// SecretsClient is the client API for Secrets service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SecretsClient interface { + GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) +} + +type secretsClient struct { + cc *grpc.ClientConn +} + +func NewSecretsClient(cc *grpc.ClientConn) SecretsClient { + return &secretsClient{cc} +} + +func (c *secretsClient) GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) { + out := new(GetSecretResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.secrets.v1.Secrets/GetSecret", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SecretsServer is the server API for Secrets service. +type SecretsServer interface { + GetSecret(context.Context, *GetSecretRequest) (*GetSecretResponse, error) +} + +// UnimplementedSecretsServer can be embedded to have forward compatible implementations. +type UnimplementedSecretsServer struct { +} + +func (*UnimplementedSecretsServer) GetSecret(ctx context.Context, req *GetSecretRequest) (*GetSecretResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSecret not implemented") +} + +func RegisterSecretsServer(s *grpc.Server, srv SecretsServer) { + s.RegisterService(&_Secrets_serviceDesc, srv) +} + +func _Secrets_GetSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SecretsServer).GetSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.secrets.v1.Secrets/GetSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SecretsServer).GetSecret(ctx, req.(*GetSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Secrets_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.buildkit.secrets.v1.Secrets", + HandlerType: (*SecretsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSecret", + Handler: _Secrets_GetSecret_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "secrets.proto", +} + +func (m *GetSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSecretRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSecretRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintSecrets(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintSecrets(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintSecrets(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ID) > 0 { + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintSecrets(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSecretResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetSecretResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintSecrets(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSecrets(dAtA []byte, offset int, v uint64) int { + offset -= sovSecrets(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GetSecretRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovSecrets(uint64(l)) + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovSecrets(uint64(len(k))) + 1 + len(v) + sovSecrets(uint64(len(v))) + n += mapEntrySize + 1 + sovSecrets(uint64(mapEntrySize)) + } + } + return n +} + +func (m *GetSecretResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSecrets(uint64(l)) + } + return n +} + +func sovSecrets(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSecrets(x uint64) (n int) { + return sovSecrets(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *GetSecretRequest) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k, _ := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&GetSecretRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `}`, + }, "") + return s +} +func (this *GetSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetSecretResponse{`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func valueToStringSecrets(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *GetSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecrets + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecrets + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSecrets + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSecrets + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecrets + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSecrets + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthSecrets + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecrets + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecrets + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSecrets + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthSecrets + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecrets + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSecrets + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthSecrets + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSecrets(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSecrets + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSecrets(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSecrets + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSecrets + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecrets + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSecrets + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSecrets + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSecrets + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSecrets(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSecrets + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSecrets + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSecrets(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSecrets + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSecrets + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSecrets + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSecrets + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSecrets + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSecrets + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSecrets = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSecrets = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSecrets = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/moby/buildkit/session/secrets/secrets.proto b/vendor/github.com/moby/buildkit/session/secrets/secrets.proto new file mode 100644 index 00000000..17d86245 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/secrets/secrets.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package moby.buildkit.secrets.v1; + +option go_package = "secrets"; + +service Secrets{ + rpc GetSecret(GetSecretRequest) returns (GetSecretResponse); +} + + +message GetSecretRequest { + string ID = 1; + map annotations = 2; +} + +message GetSecretResponse { + bytes data = 1; +} diff --git a/vendor/github.com/moby/buildkit/session/session.go b/vendor/github.com/moby/buildkit/session/session.go new file mode 100644 index 00000000..5d04738e --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/session.go @@ -0,0 +1,143 @@ +package session + +import ( + "context" + "net" + "strings" + + "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" + "github.com/moby/buildkit/identity" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +const ( + headerSessionID = "X-Docker-Expose-Session-Uuid" + headerSessionName = "X-Docker-Expose-Session-Name" + headerSessionSharedKey = "X-Docker-Expose-Session-Sharedkey" + headerSessionMethod = "X-Docker-Expose-Session-Grpc-Method" +) + +// Dialer returns a connection that can be used by the session +type Dialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) + +// Attachable defines a feature that can be exposed on a session +type Attachable interface { + Register(*grpc.Server) +} + +// Session is a long running connection between client and a daemon +type Session struct { + id string + name string + sharedKey string + ctx context.Context + cancelCtx func() + done chan struct{} + grpcServer *grpc.Server + conn net.Conn +} + +// NewSession returns a new long running session +func NewSession(ctx context.Context, name, sharedKey string) (*Session, error) { + id := identity.NewID() + + serverOpts := []grpc.ServerOption{} + if span := opentracing.SpanFromContext(ctx); span != nil { + tracer := span.Tracer() + serverOpts = []grpc.ServerOption{ + grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(span.Tracer(), traceFilter())), + grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer, traceFilter())), + } + } + + s := &Session{ + id: id, + name: name, + sharedKey: sharedKey, + grpcServer: grpc.NewServer(serverOpts...), + } + + grpc_health_v1.RegisterHealthServer(s.grpcServer, health.NewServer()) + + return s, nil +} + +// Allow enables a given service to be reachable through the grpc session +func (s *Session) Allow(a Attachable) { + a.Register(s.grpcServer) +} + +// ID returns unique identifier for the session +func (s *Session) ID() string { + return s.id +} + +// Run activates the session +func (s *Session) Run(ctx context.Context, dialer Dialer) error { + ctx, cancel := context.WithCancel(ctx) + s.cancelCtx = cancel + s.done = make(chan struct{}) + + defer cancel() + defer close(s.done) + + meta := make(map[string][]string) + meta[headerSessionID] = []string{s.id} + meta[headerSessionName] = []string{s.name} + meta[headerSessionSharedKey] = []string{s.sharedKey} + + for name, svc := range s.grpcServer.GetServiceInfo() { + for _, method := range svc.Methods { + meta[headerSessionMethod] = append(meta[headerSessionMethod], MethodURL(name, method.Name)) + } + } + conn, err := dialer(ctx, "h2c", meta) + if err != nil { + return errors.Wrap(err, "failed to dial gRPC") + } + s.conn = conn + serve(ctx, s.grpcServer, conn) + return nil +} + +// Close closes the session +func (s *Session) Close() error { + if s.cancelCtx != nil && s.done != nil { + if s.conn != nil { + s.conn.Close() + } + s.grpcServer.Stop() + <-s.done + } + return nil +} + +func (s *Session) context() context.Context { + return s.ctx +} + +func (s *Session) closed() bool { + select { + case <-s.context().Done(): + return true + default: + return false + } +} + +// MethodURL returns a gRPC method URL for service and method name +func MethodURL(s, m string) string { + return "/" + s + "/" + m +} + +func traceFilter() otgrpc.Option { + return otgrpc.IncludingSpans(func(parentSpanCtx opentracing.SpanContext, + method string, + req, resp interface{}) bool { + return !strings.HasSuffix(method, "Health/Check") + }) +} diff --git a/vendor/github.com/moby/buildkit/session/sshforward/copy.go b/vendor/github.com/moby/buildkit/session/sshforward/copy.go new file mode 100644 index 00000000..85366f19 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/sshforward/copy.go @@ -0,0 +1,65 @@ +package sshforward + +import ( + io "io" + + "github.com/pkg/errors" + context "golang.org/x/net/context" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" +) + +func Copy(ctx context.Context, conn io.ReadWriteCloser, stream grpc.Stream, closeStream func() error) error { + g, ctx := errgroup.WithContext(ctx) + + g.Go(func() (retErr error) { + p := &BytesMessage{} + for { + if err := stream.RecvMsg(p); err != nil { + conn.Close() + if err == io.EOF { + return nil + } + return errors.WithStack(err) + } + select { + case <-ctx.Done(): + conn.Close() + return ctx.Err() + default: + } + if _, err := conn.Write(p.Data); err != nil { + conn.Close() + return errors.WithStack(err) + } + p.Data = p.Data[:0] + } + }) + + g.Go(func() (retErr error) { + for { + buf := make([]byte, 32*1024) + n, err := conn.Read(buf) + switch { + case err == io.EOF: + if closeStream != nil { + closeStream() + } + return nil + case err != nil: + return errors.WithStack(err) + } + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + p := &BytesMessage{Data: buf[:n]} + if err := stream.SendMsg(p); err != nil { + return errors.WithStack(err) + } + } + }) + + return g.Wait() +} diff --git a/vendor/github.com/moby/buildkit/session/sshforward/generate.go b/vendor/github.com/moby/buildkit/session/sshforward/generate.go new file mode 100644 index 00000000..feecc774 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/sshforward/generate.go @@ -0,0 +1,3 @@ +package sshforward + +//go:generate protoc --gogoslick_out=plugins=grpc:. ssh.proto diff --git a/vendor/github.com/moby/buildkit/session/sshforward/ssh.go b/vendor/github.com/moby/buildkit/session/sshforward/ssh.go new file mode 100644 index 00000000..a7a4c2e2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/sshforward/ssh.go @@ -0,0 +1,118 @@ +package sshforward + +import ( + "io/ioutil" + "net" + "os" + "path/filepath" + + "github.com/moby/buildkit/session" + "github.com/pkg/errors" + context "golang.org/x/net/context" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc/metadata" +) + +// DefaultID is the default ssh ID +const DefaultID = "default" + +const KeySSHID = "buildkit.ssh.id" + +type server struct { + caller session.Caller +} + +func (s *server) run(ctx context.Context, l net.Listener, id string) error { + eg, ctx := errgroup.WithContext(ctx) + + eg.Go(func() error { + <-ctx.Done() + return ctx.Err() + }) + + eg.Go(func() error { + for { + conn, err := l.Accept() + if err != nil { + return err + } + + client := NewSSHClient(s.caller.Conn()) + + opts := make(map[string][]string) + opts[KeySSHID] = []string{id} + ctx = metadata.NewOutgoingContext(ctx, opts) + + stream, err := client.ForwardAgent(ctx) + if err != nil { + conn.Close() + return err + } + + go Copy(ctx, conn, stream, stream.CloseSend) + } + }) + + return eg.Wait() +} + +type SocketOpt struct { + ID string + UID int + GID int + Mode int +} + +func MountSSHSocket(ctx context.Context, c session.Caller, opt SocketOpt) (sockPath string, closer func() error, err error) { + dir, err := ioutil.TempDir("", ".buildkit-ssh-sock") + if err != nil { + return "", nil, errors.WithStack(err) + } + + defer func() { + if err != nil { + os.RemoveAll(dir) + } + }() + + if err := os.Chmod(dir, 0711); err != nil { + return "", nil, errors.WithStack(err) + } + + sockPath = filepath.Join(dir, "ssh_auth_sock") + + l, err := net.Listen("unix", sockPath) + if err != nil { + return "", nil, errors.WithStack(err) + } + + if err := os.Chown(sockPath, opt.UID, opt.GID); err != nil { + l.Close() + return "", nil, errors.WithStack(err) + } + if err := os.Chmod(sockPath, os.FileMode(opt.Mode)); err != nil { + l.Close() + return "", nil, errors.WithStack(err) + } + + s := &server{caller: c} + + id := opt.ID + if id == "" { + id = DefaultID + } + + go s.run(ctx, l, id) // erroring per connection allowed + + return sockPath, func() error { + err := l.Close() + os.RemoveAll(sockPath) + return errors.WithStack(err) + }, nil +} + +func CheckSSHID(ctx context.Context, c session.Caller, id string) error { + client := NewSSHClient(c.Conn()) + _, err := client.CheckAgent(ctx, &CheckAgentRequest{ID: id}) + return errors.WithStack(err) +} diff --git a/vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go b/vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go new file mode 100644 index 00000000..62adbe6d --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go @@ -0,0 +1,918 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ssh.proto + +package sshforward + +import ( + bytes "bytes" + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// BytesMessage contains a chunk of byte data +type BytesMessage struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *BytesMessage) Reset() { *m = BytesMessage{} } +func (*BytesMessage) ProtoMessage() {} +func (*BytesMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_ef0eae71e2e883eb, []int{0} +} +func (m *BytesMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BytesMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BytesMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BytesMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesMessage.Merge(m, src) +} +func (m *BytesMessage) XXX_Size() int { + return m.Size() +} +func (m *BytesMessage) XXX_DiscardUnknown() { + xxx_messageInfo_BytesMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesMessage proto.InternalMessageInfo + +func (m *BytesMessage) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type CheckAgentRequest struct { + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (m *CheckAgentRequest) Reset() { *m = CheckAgentRequest{} } +func (*CheckAgentRequest) ProtoMessage() {} +func (*CheckAgentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ef0eae71e2e883eb, []int{1} +} +func (m *CheckAgentRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CheckAgentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CheckAgentRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CheckAgentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckAgentRequest.Merge(m, src) +} +func (m *CheckAgentRequest) XXX_Size() int { + return m.Size() +} +func (m *CheckAgentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CheckAgentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckAgentRequest proto.InternalMessageInfo + +func (m *CheckAgentRequest) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +type CheckAgentResponse struct { +} + +func (m *CheckAgentResponse) Reset() { *m = CheckAgentResponse{} } +func (*CheckAgentResponse) ProtoMessage() {} +func (*CheckAgentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ef0eae71e2e883eb, []int{2} +} +func (m *CheckAgentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CheckAgentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CheckAgentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CheckAgentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckAgentResponse.Merge(m, src) +} +func (m *CheckAgentResponse) XXX_Size() int { + return m.Size() +} +func (m *CheckAgentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CheckAgentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckAgentResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*BytesMessage)(nil), "moby.sshforward.v1.BytesMessage") + proto.RegisterType((*CheckAgentRequest)(nil), "moby.sshforward.v1.CheckAgentRequest") + proto.RegisterType((*CheckAgentResponse)(nil), "moby.sshforward.v1.CheckAgentResponse") +} + +func init() { proto.RegisterFile("ssh.proto", fileDescriptor_ef0eae71e2e883eb) } + +var fileDescriptor_ef0eae71e2e883eb = []byte{ + // 252 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2c, 0x2e, 0xce, 0xd0, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xca, 0xcd, 0x4f, 0xaa, 0xd4, 0x2b, 0x2e, 0xce, 0x48, + 0xcb, 0x2f, 0x2a, 0x4f, 0x2c, 0x4a, 0xd1, 0x2b, 0x33, 0x54, 0x52, 0xe2, 0xe2, 0x71, 0xaa, 0x2c, + 0x49, 0x2d, 0xf6, 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x4f, 0x15, 0x12, 0xe2, 0x62, 0x49, 0x49, 0x2c, + 0x49, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0x94, 0xb9, 0x04, 0x9d, 0x33, + 0x52, 0x93, 0xb3, 0x1d, 0xd3, 0x53, 0xf3, 0x4a, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, + 0xf8, 0xb8, 0x98, 0x3c, 0x5d, 0xc0, 0xca, 0x38, 0x83, 0x98, 0x3c, 0x5d, 0x94, 0x44, 0xb8, 0x84, + 0x90, 0x15, 0x15, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x1a, 0xed, 0x62, 0xe4, 0x62, 0x0e, 0x0e, 0xf6, + 0x10, 0x8a, 0xe6, 0xe2, 0x42, 0xc8, 0x0a, 0xa9, 0xea, 0x61, 0xba, 0x44, 0x0f, 0xc3, 0x0a, 0x29, + 0x35, 0x42, 0xca, 0x20, 0x96, 0x08, 0x85, 0x71, 0xf1, 0xb8, 0x41, 0x14, 0x40, 0x8c, 0x57, 0xc0, + 0xa6, 0x0f, 0xd9, 0x97, 0x52, 0x04, 0x55, 0x68, 0x30, 0x1a, 0x30, 0x3a, 0x39, 0x5c, 0x78, 0x28, + 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, 0x3c, + 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x5f, + 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, + 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xe2, 0x42, 0x98, 0x9a, 0xc4, 0x06, 0x0e, 0x78, 0x63, 0x40, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x6c, 0xe6, 0x6d, 0xb7, 0x85, 0x01, 0x00, 0x00, +} + +func (this *BytesMessage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BytesMessage) + if !ok { + that2, ok := that.(BytesMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *CheckAgentRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CheckAgentRequest) + if !ok { + that2, ok := that.(CheckAgentRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ID != that1.ID { + return false + } + return true +} +func (this *CheckAgentResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CheckAgentResponse) + if !ok { + that2, ok := that.(CheckAgentResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *BytesMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&sshforward.BytesMessage{") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CheckAgentRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&sshforward.CheckAgentRequest{") + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CheckAgentResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&sshforward.CheckAgentResponse{") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringSsh(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// SSHClient is the client API for SSH service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SSHClient interface { + CheckAgent(ctx context.Context, in *CheckAgentRequest, opts ...grpc.CallOption) (*CheckAgentResponse, error) + ForwardAgent(ctx context.Context, opts ...grpc.CallOption) (SSH_ForwardAgentClient, error) +} + +type sSHClient struct { + cc *grpc.ClientConn +} + +func NewSSHClient(cc *grpc.ClientConn) SSHClient { + return &sSHClient{cc} +} + +func (c *sSHClient) CheckAgent(ctx context.Context, in *CheckAgentRequest, opts ...grpc.CallOption) (*CheckAgentResponse, error) { + out := new(CheckAgentResponse) + err := c.cc.Invoke(ctx, "/moby.sshforward.v1.SSH/CheckAgent", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sSHClient) ForwardAgent(ctx context.Context, opts ...grpc.CallOption) (SSH_ForwardAgentClient, error) { + stream, err := c.cc.NewStream(ctx, &_SSH_serviceDesc.Streams[0], "/moby.sshforward.v1.SSH/ForwardAgent", opts...) + if err != nil { + return nil, err + } + x := &sSHForwardAgentClient{stream} + return x, nil +} + +type SSH_ForwardAgentClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type sSHForwardAgentClient struct { + grpc.ClientStream +} + +func (x *sSHForwardAgentClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *sSHForwardAgentClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// SSHServer is the server API for SSH service. +type SSHServer interface { + CheckAgent(context.Context, *CheckAgentRequest) (*CheckAgentResponse, error) + ForwardAgent(SSH_ForwardAgentServer) error +} + +// UnimplementedSSHServer can be embedded to have forward compatible implementations. +type UnimplementedSSHServer struct { +} + +func (*UnimplementedSSHServer) CheckAgent(ctx context.Context, req *CheckAgentRequest) (*CheckAgentResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckAgent not implemented") +} +func (*UnimplementedSSHServer) ForwardAgent(srv SSH_ForwardAgentServer) error { + return status.Errorf(codes.Unimplemented, "method ForwardAgent not implemented") +} + +func RegisterSSHServer(s *grpc.Server, srv SSHServer) { + s.RegisterService(&_SSH_serviceDesc, srv) +} + +func _SSH_CheckAgent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckAgentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SSHServer).CheckAgent(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.sshforward.v1.SSH/CheckAgent", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SSHServer).CheckAgent(ctx, req.(*CheckAgentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SSH_ForwardAgent_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SSHServer).ForwardAgent(&sSHForwardAgentServer{stream}) +} + +type SSH_ForwardAgentServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type sSHForwardAgentServer struct { + grpc.ServerStream +} + +func (x *sSHForwardAgentServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *sSHForwardAgentServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _SSH_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.sshforward.v1.SSH", + HandlerType: (*SSHServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CheckAgent", + Handler: _SSH_CheckAgent_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ForwardAgent", + Handler: _SSH_ForwardAgent_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "ssh.proto", +} + +func (m *BytesMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BytesMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintSsh(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CheckAgentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckAgentRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CheckAgentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ID) > 0 { + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintSsh(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CheckAgentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckAgentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CheckAgentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintSsh(dAtA []byte, offset int, v uint64) int { + offset -= sovSsh(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BytesMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSsh(uint64(l)) + } + return n +} + +func (m *CheckAgentRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovSsh(uint64(l)) + } + return n +} + +func (m *CheckAgentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovSsh(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSsh(x uint64) (n int) { + return sovSsh(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BytesMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BytesMessage{`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *CheckAgentRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckAgentRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *CheckAgentResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckAgentResponse{`, + `}`, + }, "") + return s +} +func valueToStringSsh(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BytesMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSsh + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSsh + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSsh + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthSsh + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSsh(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSsh + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSsh + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckAgentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSsh + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckAgentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckAgentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSsh + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSsh + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSsh + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSsh(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSsh + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSsh + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckAgentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSsh + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckAgentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckAgentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipSsh(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSsh + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSsh + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSsh(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSsh + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSsh + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSsh + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSsh + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupSsh + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthSsh + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthSsh = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSsh = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupSsh = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/moby/buildkit/session/sshforward/ssh.proto b/vendor/github.com/moby/buildkit/session/sshforward/ssh.proto new file mode 100644 index 00000000..99f63436 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/sshforward/ssh.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package moby.sshforward.v1; + +option go_package = "sshforward"; + +service SSH { + rpc CheckAgent(CheckAgentRequest) returns (CheckAgentResponse); + rpc ForwardAgent(stream BytesMessage) returns (stream BytesMessage); +} + +// BytesMessage contains a chunk of byte data +message BytesMessage{ + bytes data = 1; +} + +message CheckAgentRequest { + string ID = 1; +} + +message CheckAgentResponse { +} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/session/testutil/testutil.go b/vendor/github.com/moby/buildkit/session/testutil/testutil.go new file mode 100644 index 00000000..168656df --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/testutil/testutil.go @@ -0,0 +1,70 @@ +package testutil + +import ( + "context" + "io" + "net" + "time" + + "github.com/sirupsen/logrus" +) + +// Handler is function called to handle incoming connection +type Handler func(ctx context.Context, conn net.Conn, meta map[string][]string) error + +// Dialer is a function for dialing an outgoing connection +type Dialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) + +// TestStream creates an in memory session dialer for a handler function +func TestStream(handler Handler) Dialer { + s1, s2 := sockPair() + return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { + go func() { + err := handler(context.TODO(), s1, meta) + if err != nil { + logrus.Error(err) + } + s1.Close() + }() + return s2, nil + } +} + +func sockPair() (*sock, *sock) { + pr1, pw1 := io.Pipe() + pr2, pw2 := io.Pipe() + return &sock{pw1, pr2, pw1}, &sock{pw2, pr1, pw2} +} + +type sock struct { + io.Writer + io.Reader + io.Closer +} + +func (s *sock) LocalAddr() net.Addr { + return dummyAddr{} +} +func (s *sock) RemoteAddr() net.Addr { + return dummyAddr{} +} +func (s *sock) SetDeadline(t time.Time) error { + return nil +} +func (s *sock) SetReadDeadline(t time.Time) error { + return nil +} +func (s *sock) SetWriteDeadline(t time.Time) error { + return nil +} + +type dummyAddr struct { +} + +func (d dummyAddr) Network() string { + return "tcp" +} + +func (d dummyAddr) String() string { + return "localhost" +} diff --git a/vendor/github.com/moby/buildkit/session/upload/generate.go b/vendor/github.com/moby/buildkit/session/upload/generate.go new file mode 100644 index 00000000..c498a920 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/upload/generate.go @@ -0,0 +1,3 @@ +package upload + +//go:generate protoc --gogoslick_out=plugins=grpc:. upload.proto diff --git a/vendor/github.com/moby/buildkit/session/upload/upload.go b/vendor/github.com/moby/buildkit/session/upload/upload.go new file mode 100644 index 00000000..c739b92d --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/upload/upload.go @@ -0,0 +1,56 @@ +package upload + +import ( + "context" + io "io" + "net/url" + + "github.com/moby/buildkit/session" + "github.com/pkg/errors" + "google.golang.org/grpc/metadata" +) + +const ( + keyPath = "urlpath" + keyHost = "urlhost" +) + +func New(ctx context.Context, c session.Caller, url *url.URL) (*Upload, error) { + opts := map[string][]string{ + keyPath: {url.Path}, + keyHost: {url.Host}, + } + + client := NewUploadClient(c.Conn()) + + ctx = metadata.NewOutgoingContext(ctx, opts) + + cc, err := client.Pull(ctx) + if err != nil { + return nil, errors.WithStack(err) + } + + return &Upload{cc: cc}, nil +} + +type Upload struct { + cc Upload_PullClient +} + +func (u *Upload) WriteTo(w io.Writer) (int, error) { + n := 0 + for { + var bm BytesMessage + if err := u.cc.RecvMsg(&bm); err != nil { + if err == io.EOF { + return n, nil + } + return n, errors.WithStack(err) + } + nn, err := w.Write(bm.Data) + n += nn + if err != nil { + return n, errors.WithStack(err) + } + } +} diff --git a/vendor/github.com/moby/buildkit/session/upload/upload.pb.go b/vendor/github.com/moby/buildkit/session/upload/upload.pb.go new file mode 100644 index 00000000..6d774493 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/upload/upload.pb.go @@ -0,0 +1,501 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: upload.proto + +package upload + +import ( + bytes "bytes" + context "context" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// BytesMessage contains a chunk of byte data +type BytesMessage struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *BytesMessage) Reset() { *m = BytesMessage{} } +func (*BytesMessage) ProtoMessage() {} +func (*BytesMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_91b94b655bd2a7e5, []int{0} +} +func (m *BytesMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BytesMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BytesMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BytesMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesMessage.Merge(m, src) +} +func (m *BytesMessage) XXX_Size() int { + return m.Size() +} +func (m *BytesMessage) XXX_DiscardUnknown() { + xxx_messageInfo_BytesMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesMessage proto.InternalMessageInfo + +func (m *BytesMessage) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterType((*BytesMessage)(nil), "moby.upload.v1.BytesMessage") +} + +func init() { proto.RegisterFile("upload.proto", fileDescriptor_91b94b655bd2a7e5) } + +var fileDescriptor_91b94b655bd2a7e5 = []byte{ + // 179 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x2d, 0xc8, 0xc9, + 0x4f, 0x4c, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0xcb, 0xcd, 0x4f, 0xaa, 0xd4, 0x83, + 0x0a, 0x95, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6, 0x16, 0x17, + 0x27, 0xa6, 0xa7, 0x0a, 0x09, 0x71, 0xb1, 0xa4, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0xf0, 0x04, 0x81, 0xd9, 0x46, 0x01, 0x5c, 0x6c, 0xa1, 0x60, 0x0d, 0x42, 0x6e, 0x5c, 0x2c, 0x01, + 0xa5, 0x39, 0x39, 0x42, 0x32, 0x7a, 0xa8, 0xc6, 0xe8, 0x21, 0x9b, 0x21, 0x85, 0x57, 0x56, 0x83, + 0xd1, 0x80, 0xd1, 0xc9, 0xe6, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e, 0x3c, 0x94, + 0x63, 0x6c, 0x78, 0x24, 0xc7, 0xb8, 0xe2, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, + 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0xf8, 0xe2, 0x91, 0x1c, 0xc3, 0x87, 0x47, 0x72, 0x8c, 0x13, + 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x14, 0x1b, 0xc4, 0xc4, + 0x24, 0x36, 0xb0, 0x57, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x12, 0xf2, 0xfc, 0xb4, 0xda, + 0x00, 0x00, 0x00, +} + +func (this *BytesMessage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BytesMessage) + if !ok { + that2, ok := that.(BytesMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *BytesMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&upload.BytesMessage{") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringUpload(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// UploadClient is the client API for Upload service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type UploadClient interface { + Pull(ctx context.Context, opts ...grpc.CallOption) (Upload_PullClient, error) +} + +type uploadClient struct { + cc *grpc.ClientConn +} + +func NewUploadClient(cc *grpc.ClientConn) UploadClient { + return &uploadClient{cc} +} + +func (c *uploadClient) Pull(ctx context.Context, opts ...grpc.CallOption) (Upload_PullClient, error) { + stream, err := c.cc.NewStream(ctx, &_Upload_serviceDesc.Streams[0], "/moby.upload.v1.Upload/Pull", opts...) + if err != nil { + return nil, err + } + x := &uploadPullClient{stream} + return x, nil +} + +type Upload_PullClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type uploadPullClient struct { + grpc.ClientStream +} + +func (x *uploadPullClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *uploadPullClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// UploadServer is the server API for Upload service. +type UploadServer interface { + Pull(Upload_PullServer) error +} + +// UnimplementedUploadServer can be embedded to have forward compatible implementations. +type UnimplementedUploadServer struct { +} + +func (*UnimplementedUploadServer) Pull(srv Upload_PullServer) error { + return status.Errorf(codes.Unimplemented, "method Pull not implemented") +} + +func RegisterUploadServer(s *grpc.Server, srv UploadServer) { + s.RegisterService(&_Upload_serviceDesc, srv) +} + +func _Upload_Pull_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(UploadServer).Pull(&uploadPullServer{stream}) +} + +type Upload_PullServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type uploadPullServer struct { + grpc.ServerStream +} + +func (x *uploadPullServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *uploadPullServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Upload_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.upload.v1.Upload", + HandlerType: (*UploadServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Pull", + Handler: _Upload_Pull_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "upload.proto", +} + +func (m *BytesMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BytesMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintUpload(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintUpload(dAtA []byte, offset int, v uint64) int { + offset -= sovUpload(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BytesMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovUpload(uint64(l)) + } + return n +} + +func sovUpload(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozUpload(x uint64) (n int) { + return sovUpload(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BytesMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BytesMessage{`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func valueToStringUpload(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BytesMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowUpload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowUpload + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthUpload + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthUpload + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipUpload(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthUpload + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthUpload + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipUpload(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowUpload + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowUpload + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowUpload + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthUpload + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupUpload + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthUpload + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthUpload = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowUpload = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupUpload = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/moby/buildkit/session/upload/upload.proto b/vendor/github.com/moby/buildkit/session/upload/upload.proto new file mode 100644 index 00000000..ce254ba9 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/upload/upload.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package moby.upload.v1; + +option go_package = "upload"; + +service Upload { + rpc Pull(stream BytesMessage) returns (stream BytesMessage); +} + +// BytesMessage contains a chunk of byte data +message BytesMessage{ + bytes data = 1; +} diff --git a/vendor/github.com/moby/buildkit/snapshot/containerd/content.go b/vendor/github.com/moby/buildkit/snapshot/containerd/content.go new file mode 100644 index 00000000..8e42d425 --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/containerd/content.go @@ -0,0 +1,82 @@ +package containerd + +import ( + "context" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/namespaces" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func NewContentStore(store content.Store, ns string) content.Store { + return &nsContent{ns, store} +} + +type nsContent struct { + ns string + content.Store +} + +func (c *nsContent) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + ctx = namespaces.WithNamespace(ctx, c.ns) + return c.Store.Info(ctx, dgst) +} + +func (c *nsContent) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + ctx = namespaces.WithNamespace(ctx, c.ns) + return c.Store.Update(ctx, info, fieldpaths...) +} + +func (c *nsContent) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { + ctx = namespaces.WithNamespace(ctx, c.ns) + return c.Store.Walk(ctx, fn, filters...) +} + +func (c *nsContent) Delete(ctx context.Context, dgst digest.Digest) error { + return errors.Errorf("contentstore.Delete usage is forbidden") +} + +func (c *nsContent) Status(ctx context.Context, ref string) (content.Status, error) { + ctx = namespaces.WithNamespace(ctx, c.ns) + return c.Store.Status(ctx, ref) +} + +func (c *nsContent) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { + ctx = namespaces.WithNamespace(ctx, c.ns) + return c.Store.ListStatuses(ctx, filters...) +} + +func (c *nsContent) Abort(ctx context.Context, ref string) error { + ctx = namespaces.WithNamespace(ctx, c.ns) + return c.Store.Abort(ctx, ref) +} + +func (c *nsContent) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + ctx = namespaces.WithNamespace(ctx, c.ns) + return c.Store.ReaderAt(ctx, desc) +} + +func (c *nsContent) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + return c.writer(ctx, 3, opts...) +} + +func (c *nsContent) writer(ctx context.Context, retries int, opts ...content.WriterOpt) (content.Writer, error) { + ctx = namespaces.WithNamespace(ctx, c.ns) + w, err := c.Store.Writer(ctx, opts...) + if err != nil { + return nil, err + } + return &nsWriter{Writer: w, ns: c.ns}, nil +} + +type nsWriter struct { + content.Writer + ns string +} + +func (w *nsWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + ctx = namespaces.WithNamespace(ctx, w.ns) + return w.Writer.Commit(ctx, size, expected, opts...) +} diff --git a/vendor/github.com/moby/buildkit/snapshot/containerd/snapshotter.go b/vendor/github.com/moby/buildkit/snapshot/containerd/snapshotter.go new file mode 100644 index 00000000..93e8b395 --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/containerd/snapshotter.go @@ -0,0 +1,63 @@ +package containerd + +import ( + "context" + + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/snapshots" + "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/snapshot" + "github.com/pkg/errors" +) + +func NewSnapshotter(name string, snapshotter snapshots.Snapshotter, ns string, idmap *idtools.IdentityMapping) snapshot.Snapshotter { + return snapshot.FromContainerdSnapshotter(name, &nsSnapshotter{ns, snapshotter}, idmap) +} + +func NSSnapshotter(ns string, snapshotter snapshots.Snapshotter) snapshots.Snapshotter { + return &nsSnapshotter{ns: ns, Snapshotter: snapshotter} +} + +type nsSnapshotter struct { + ns string + snapshots.Snapshotter +} + +func (s *nsSnapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) { + ctx = namespaces.WithNamespace(ctx, s.ns) + return s.Snapshotter.Stat(ctx, key) +} + +func (s *nsSnapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { + ctx = namespaces.WithNamespace(ctx, s.ns) + return s.Snapshotter.Update(ctx, info, fieldpaths...) +} + +func (s *nsSnapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) { + ctx = namespaces.WithNamespace(ctx, s.ns) + return s.Snapshotter.Usage(ctx, key) +} +func (s *nsSnapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) { + ctx = namespaces.WithNamespace(ctx, s.ns) + return s.Snapshotter.Mounts(ctx, key) +} +func (s *nsSnapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + ctx = namespaces.WithNamespace(ctx, s.ns) + return s.Snapshotter.Prepare(ctx, key, parent, opts...) +} +func (s *nsSnapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + ctx = namespaces.WithNamespace(ctx, s.ns) + return s.Snapshotter.View(ctx, key, parent, opts...) +} +func (s *nsSnapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { + ctx = namespaces.WithNamespace(ctx, s.ns) + return s.Snapshotter.Commit(ctx, name, key, opts...) +} +func (s *nsSnapshotter) Remove(ctx context.Context, key string) error { + return errors.Errorf("calling snapshotter.Remove is forbidden") +} +func (s *nsSnapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, filters ...string) error { + ctx = namespaces.WithNamespace(ctx, s.ns) + return s.Snapshotter.Walk(ctx, fn, filters...) +} diff --git a/vendor/github.com/moby/buildkit/snapshot/imagerefchecker/checker.go b/vendor/github.com/moby/buildkit/snapshot/imagerefchecker/checker.go new file mode 100644 index 00000000..a8dd746e --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/imagerefchecker/checker.go @@ -0,0 +1,135 @@ +package imagerefchecker + +import ( + "context" + "encoding/json" + "strings" + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/moby/buildkit/cache" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + emptyGZLayer = digest.Digest("sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1") +) + +type Opt struct { + ImageStore images.Store + ContentStore content.Store +} + +// New creates new image reference checker that can be used to see if a reference +// is being used by any of the images in the image store +func New(opt Opt) cache.ExternalRefCheckerFunc { + return func() (cache.ExternalRefChecker, error) { + return &Checker{opt: opt}, nil + } +} + +type Checker struct { + opt Opt + once sync.Once + images map[string]struct{} + cache map[string]bool +} + +func (c *Checker) Exists(key string, blobs []digest.Digest) bool { + if c.opt.ImageStore == nil { + return false + } + + c.once.Do(c.init) + + if b, ok := c.cache[key]; ok { + return b + } + + _, ok := c.images[layerKey(blobs)] + c.cache[key] = ok + return ok +} + +func (c *Checker) init() { + c.images = map[string]struct{}{} + c.cache = map[string]bool{} + + imgs, err := c.opt.ImageStore.List(context.TODO()) + if err != nil { + return + } + + var mu sync.Mutex + + for _, img := range imgs { + if err := images.Dispatch(context.TODO(), images.Handlers(layersHandler(c.opt.ContentStore, func(layers []specs.Descriptor) { + mu.Lock() + c.registerLayers(layers) + mu.Unlock() + })), nil, img.Target); err != nil { + return + } + } +} + +func (c *Checker) registerLayers(l []specs.Descriptor) { + if k := layerKey(toDigests(l)); k != "" { + c.images[k] = struct{}{} + } +} + +func toDigests(layers []specs.Descriptor) []digest.Digest { + digests := make([]digest.Digest, len(layers)) + for i, l := range layers { + digests[i] = l.Digest + } + return digests +} + +func layerKey(layers []digest.Digest) string { + b := &strings.Builder{} + for _, l := range layers { + if l != emptyGZLayer { + b.Write([]byte(l)) + } + } + return b.String() +} + +func layersHandler(provider content.Provider, f func([]specs.Descriptor)) images.HandlerFunc { + return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, specs.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, nil + } + + var manifest specs.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + f(manifest.Layers) + return nil, nil + case images.MediaTypeDockerSchema2ManifestList, specs.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, nil + } + + var index specs.Index + if err := json.Unmarshal(p, &index); err != nil { + return nil, err + } + + return index.Manifests, nil + default: + return nil, errors.Errorf("encountered unknown type %v", desc.MediaType) + } + } +} diff --git a/vendor/github.com/moby/buildkit/snapshot/localmounter.go b/vendor/github.com/moby/buildkit/snapshot/localmounter.go new file mode 100644 index 00000000..545c66c5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/localmounter.go @@ -0,0 +1,74 @@ +package snapshot + +import ( + "io/ioutil" + "os" + "sync" + + "github.com/containerd/containerd/mount" + "github.com/pkg/errors" +) + +type Mounter interface { + Mount() (string, error) + Unmount() error +} + +// LocalMounter is a helper for mounting mountfactory to temporary path. In +// addition it can mount binds without privileges +func LocalMounter(mountable Mountable) Mounter { + return &localMounter{mountable: mountable} +} + +// LocalMounterWithMounts is a helper for mounting to temporary path. In +// addition it can mount binds without privileges +func LocalMounterWithMounts(mounts []mount.Mount) Mounter { + return &localMounter{mounts: mounts} +} + +type localMounter struct { + mu sync.Mutex + mounts []mount.Mount + mountable Mountable + target string + release func() error +} + +func (lm *localMounter) Mount() (string, error) { + lm.mu.Lock() + defer lm.mu.Unlock() + + if lm.mounts == nil { + mounts, release, err := lm.mountable.Mount() + if err != nil { + return "", err + } + lm.mounts = mounts + lm.release = release + } + + if len(lm.mounts) == 1 && (lm.mounts[0].Type == "bind" || lm.mounts[0].Type == "rbind") { + ro := false + for _, opt := range lm.mounts[0].Options { + if opt == "ro" { + ro = true + break + } + } + if !ro { + return lm.mounts[0].Source, nil + } + } + + dir, err := ioutil.TempDir("", "buildkit-mount") + if err != nil { + return "", errors.Wrap(err, "failed to create temp dir") + } + + if err := mount.All(lm.mounts, dir); err != nil { + os.RemoveAll(dir) + return "", errors.Wrapf(err, "failed to mount %s: %+v", dir, lm.mounts) + } + lm.target = dir + return dir, nil +} diff --git a/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go b/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go new file mode 100644 index 00000000..4e285c6c --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go @@ -0,0 +1,29 @@ +// +build !windows + +package snapshot + +import ( + "os" + "syscall" + + "github.com/containerd/containerd/mount" +) + +func (lm *localMounter) Unmount() error { + lm.mu.Lock() + defer lm.mu.Unlock() + + if lm.target != "" { + if err := mount.Unmount(lm.target, syscall.MNT_DETACH); err != nil { + return err + } + os.RemoveAll(lm.target) + lm.target = "" + } + + if lm.release != nil { + return lm.release() + } + + return nil +} diff --git a/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go b/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go new file mode 100644 index 00000000..03e8f735 --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go @@ -0,0 +1,26 @@ +package snapshot + +import ( + "os" + + "github.com/containerd/containerd/mount" +) + +func (lm *localMounter) Unmount() error { + lm.mu.Lock() + defer lm.mu.Unlock() + + if lm.target != "" { + if err := mount.Unmount(lm.target, 0); err != nil { + return err + } + os.RemoveAll(lm.target) + lm.target = "" + } + + if lm.release != nil { + return lm.release() + } + + return nil +} diff --git a/vendor/github.com/moby/buildkit/snapshot/snapshotter.go b/vendor/github.com/moby/buildkit/snapshot/snapshotter.go new file mode 100644 index 00000000..a4bf1c4f --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/snapshotter.go @@ -0,0 +1,151 @@ +package snapshot + +import ( + "context" + "os" + "sync" + "sync/atomic" + + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/snapshots" + "github.com/docker/docker/pkg/idtools" +) + +type Mountable interface { + // ID() string + Mount() ([]mount.Mount, func() error, error) + IdentityMapping() *idtools.IdentityMapping +} + +// Snapshotter defines interface that any snapshot implementation should satisfy +type Snapshotter interface { + Name() string + Mounts(ctx context.Context, key string) (Mountable, error) + Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error + View(ctx context.Context, key, parent string, opts ...snapshots.Opt) (Mountable, error) + + Stat(ctx context.Context, key string) (snapshots.Info, error) + Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) + Usage(ctx context.Context, key string) (snapshots.Usage, error) + Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error + Remove(ctx context.Context, key string) error + Walk(ctx context.Context, fn snapshots.WalkFunc, filters ...string) error + Close() error + IdentityMapping() *idtools.IdentityMapping +} + +func FromContainerdSnapshotter(name string, s snapshots.Snapshotter, idmap *idtools.IdentityMapping) Snapshotter { + return &fromContainerd{name: name, Snapshotter: s, idmap: idmap} +} + +type fromContainerd struct { + name string + snapshots.Snapshotter + idmap *idtools.IdentityMapping +} + +func (s *fromContainerd) Name() string { + return s.name +} + +func (s *fromContainerd) Mounts(ctx context.Context, key string) (Mountable, error) { + mounts, err := s.Snapshotter.Mounts(ctx, key) + if err != nil { + return nil, err + } + return &staticMountable{mounts: mounts, idmap: s.idmap, id: key}, nil +} +func (s *fromContainerd) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error { + _, err := s.Snapshotter.Prepare(ctx, key, parent, opts...) + return err +} +func (s *fromContainerd) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) (Mountable, error) { + mounts, err := s.Snapshotter.View(ctx, key, parent, opts...) + if err != nil { + return nil, err + } + return &staticMountable{mounts: mounts, idmap: s.idmap, id: key}, nil +} +func (s *fromContainerd) IdentityMapping() *idtools.IdentityMapping { + return s.idmap +} + +type staticMountable struct { + count int32 + id string + mounts []mount.Mount + idmap *idtools.IdentityMapping +} + +func (cm *staticMountable) Mount() ([]mount.Mount, func() error, error) { + atomic.AddInt32(&cm.count, 1) + return cm.mounts, func() error { + if atomic.AddInt32(&cm.count, -1) < 0 { + if v := os.Getenv("BUILDKIT_DEBUG_PANIC_ON_ERROR"); v == "1" { + panic("release of released mount " + cm.id) + } + } + return nil + }, nil +} + +func (cm *staticMountable) IdentityMapping() *idtools.IdentityMapping { + return cm.idmap +} + +// NewContainerdSnapshotter converts snapshotter to containerd snapshotter +func NewContainerdSnapshotter(s Snapshotter) (snapshots.Snapshotter, func() error) { + cs := &containerdSnapshotter{Snapshotter: s} + return cs, cs.release +} + +type containerdSnapshotter struct { + mu sync.Mutex + releasers []func() error + Snapshotter +} + +func (cs *containerdSnapshotter) release() error { + cs.mu.Lock() + defer cs.mu.Unlock() + var err error + for _, f := range cs.releasers { + if err1 := f(); err1 != nil && err == nil { + err = err1 + } + } + return err +} + +func (cs *containerdSnapshotter) returnMounts(mf Mountable) ([]mount.Mount, error) { + mounts, release, err := mf.Mount() + if err != nil { + return nil, err + } + cs.mu.Lock() + cs.releasers = append(cs.releasers, release) + cs.mu.Unlock() + return mounts, nil +} + +func (cs *containerdSnapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) { + mf, err := cs.Snapshotter.Mounts(ctx, key) + if err != nil { + return nil, err + } + return cs.returnMounts(mf) +} + +func (cs *containerdSnapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + if err := cs.Snapshotter.Prepare(ctx, key, parent, opts...); err != nil { + return nil, err + } + return cs.Mounts(ctx, key) +} +func (cs *containerdSnapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { + mf, err := cs.Snapshotter.View(ctx, key, parent, opts...) + if err != nil { + return nil, err + } + return cs.returnMounts(mf) +} diff --git a/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go b/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go new file mode 100644 index 00000000..19755816 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go @@ -0,0 +1,459 @@ +package bboltcachestorage + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/moby/buildkit/solver" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +const ( + resultBucket = "_result" + linksBucket = "_links" + byResultBucket = "_byresult" + backlinksBucket = "_backlinks" +) + +type Store struct { + db *bolt.DB +} + +func NewStore(dbPath string) (*Store, error) { + db, err := bolt.Open(dbPath, 0600, nil) + if err != nil { + return nil, errors.Wrapf(err, "failed to open database file %s", dbPath) + } + if err := db.Update(func(tx *bolt.Tx) error { + for _, b := range []string{resultBucket, linksBucket, byResultBucket, backlinksBucket} { + if _, err := tx.CreateBucketIfNotExists([]byte(b)); err != nil { + return err + } + } + return nil + }); err != nil { + return nil, err + } + db.NoSync = true + return &Store{db: db}, nil +} + +func (s *Store) Exists(id string) bool { + exists := false + err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(linksBucket)).Bucket([]byte(id)) + exists = b != nil + return nil + }) + if err != nil { + return false + } + return exists +} + +func (s *Store) Walk(fn func(id string) error) error { + ids := make([]string, 0) + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(linksBucket)) + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if v == nil { + ids = append(ids, string(k)) + } + } + return nil + }); err != nil { + return err + } + for _, id := range ids { + if err := fn(id); err != nil { + return err + } + } + return nil +} + +func (s *Store) WalkResults(id string, fn func(solver.CacheResult) error) error { + var list []solver.CacheResult + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(resultBucket)) + if b == nil { + return nil + } + b = b.Bucket([]byte(id)) + if b == nil { + return nil + } + + return b.ForEach(func(k, v []byte) error { + var res solver.CacheResult + if err := json.Unmarshal(v, &res); err != nil { + return err + } + list = append(list, res) + return nil + }) + }); err != nil { + return err + } + for _, res := range list { + if err := fn(res); err != nil { + return err + } + } + return nil +} + +func (s *Store) Load(id string, resultID string) (solver.CacheResult, error) { + var res solver.CacheResult + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(resultBucket)) + if b == nil { + return errors.WithStack(solver.ErrNotFound) + } + b = b.Bucket([]byte(id)) + if b == nil { + return errors.WithStack(solver.ErrNotFound) + } + + v := b.Get([]byte(resultID)) + if v == nil { + return errors.WithStack(solver.ErrNotFound) + } + + return json.Unmarshal(v, &res) + }); err != nil { + return solver.CacheResult{}, err + } + return res, nil +} + +func (s *Store) AddResult(id string, res solver.CacheResult) error { + return s.db.Update(func(tx *bolt.Tx) error { + _, err := tx.Bucket([]byte(linksBucket)).CreateBucketIfNotExists([]byte(id)) + if err != nil { + return err + } + + b, err := tx.Bucket([]byte(resultBucket)).CreateBucketIfNotExists([]byte(id)) + if err != nil { + return err + } + dt, err := json.Marshal(res) + if err != nil { + return err + } + if err := b.Put([]byte(res.ID), dt); err != nil { + return err + } + b, err = tx.Bucket([]byte(byResultBucket)).CreateBucketIfNotExists([]byte(res.ID)) + if err != nil { + return err + } + if err := b.Put([]byte(id), []byte{}); err != nil { + return err + } + + return nil + }) +} + +func (s *Store) WalkIDsByResult(resultID string, fn func(string) error) error { + ids := map[string]struct{}{} + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(byResultBucket)) + if b == nil { + return nil + } + b = b.Bucket([]byte(resultID)) + if b == nil { + return nil + } + return b.ForEach(func(k, v []byte) error { + ids[string(k)] = struct{}{} + return nil + }) + }); err != nil { + return err + } + for id := range ids { + if err := fn(id); err != nil { + return err + } + } + return nil +} + +func (s *Store) Release(resultID string) error { + return s.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(byResultBucket)) + if b == nil { + return errors.WithStack(solver.ErrNotFound) + } + b = b.Bucket([]byte(resultID)) + if b == nil { + return errors.WithStack(solver.ErrNotFound) + } + if err := b.ForEach(func(k, v []byte) error { + return s.releaseHelper(tx, string(k), resultID) + }); err != nil { + return err + } + return nil + }) +} + +func (s *Store) releaseHelper(tx *bolt.Tx, id, resultID string) error { + results := tx.Bucket([]byte(resultBucket)).Bucket([]byte(id)) + if results == nil { + return nil + } + + if err := results.Delete([]byte(resultID)); err != nil { + return err + } + + ids := tx.Bucket([]byte(byResultBucket)) + + ids = ids.Bucket([]byte(resultID)) + if ids == nil { + return nil + } + + if err := ids.Delete([]byte(id)); err != nil { + return err + } + + if isEmptyBucket(ids) { + if err := tx.Bucket([]byte(byResultBucket)).DeleteBucket([]byte(resultID)); err != nil { + return err + } + } + + links := tx.Bucket([]byte(resultBucket)) + if results == nil { + return nil + } + links = links.Bucket([]byte(id)) + + return s.emptyBranchWithParents(tx, []byte(id)) +} + +func (s *Store) emptyBranchWithParents(tx *bolt.Tx, id []byte) error { + results := tx.Bucket([]byte(resultBucket)).Bucket(id) + if results == nil { + return nil + } + + isEmptyLinks := true + links := tx.Bucket([]byte(linksBucket)).Bucket(id) + if links != nil { + isEmptyLinks = isEmptyBucket(links) + } + + if !isEmptyBucket(results) || !isEmptyLinks { + return nil + } + + if backlinks := tx.Bucket([]byte(backlinksBucket)).Bucket(id); backlinks != nil { + if err := backlinks.ForEach(func(k, v []byte) error { + if subLinks := tx.Bucket([]byte(linksBucket)).Bucket(k); subLinks != nil { + if err := subLinks.ForEach(func(k, v []byte) error { + parts := bytes.Split(k, []byte("@")) + if len(parts) != 2 { + return errors.Errorf("invalid key %s", k) + } + if bytes.Equal(id, parts[1]) { + return subLinks.Delete(k) + } + return nil + }); err != nil { + return err + } + + if isEmptyBucket(subLinks) { + if err := tx.Bucket([]byte(linksBucket)).DeleteBucket(k); err != nil { + return err + } + } + } + return s.emptyBranchWithParents(tx, k) + }); err != nil { + return err + } + if err := tx.Bucket([]byte(backlinksBucket)).DeleteBucket(id); err != nil { + return err + } + } + + // intentionally ignoring errors + tx.Bucket([]byte(linksBucket)).DeleteBucket([]byte(id)) + tx.Bucket([]byte(resultBucket)).DeleteBucket([]byte(id)) + + return nil +} + +func (s *Store) AddLink(id string, link solver.CacheInfoLink, target string) error { + return s.db.Update(func(tx *bolt.Tx) error { + b, err := tx.Bucket([]byte(linksBucket)).CreateBucketIfNotExists([]byte(id)) + if err != nil { + return err + } + + dt, err := json.Marshal(link) + if err != nil { + return err + } + + if err := b.Put(bytes.Join([][]byte{dt, []byte(target)}, []byte("@")), []byte{}); err != nil { + return err + } + + b, err = tx.Bucket([]byte(backlinksBucket)).CreateBucketIfNotExists([]byte(target)) + if err != nil { + return err + } + + if err := b.Put([]byte(id), []byte{}); err != nil { + return err + } + + return nil + }) +} + +func (s *Store) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error { + var links []string + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(linksBucket)) + if b == nil { + return nil + } + b = b.Bucket([]byte(id)) + if b == nil { + return nil + } + + dt, err := json.Marshal(link) + if err != nil { + return err + } + index := bytes.Join([][]byte{dt, {}}, []byte("@")) + c := b.Cursor() + k, _ := c.Seek([]byte(index)) + for { + if k != nil && bytes.HasPrefix(k, index) { + target := bytes.TrimPrefix(k, index) + links = append(links, string(target)) + k, _ = c.Next() + } else { + break + } + } + + return nil + }); err != nil { + return err + } + for _, l := range links { + if err := fn(l); err != nil { + return err + } + } + return nil +} + +func (s *Store) HasLink(id string, link solver.CacheInfoLink, target string) bool { + var v bool + if err := s.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(linksBucket)) + if b == nil { + return nil + } + b = b.Bucket([]byte(id)) + if b == nil { + return nil + } + + dt, err := json.Marshal(link) + if err != nil { + return err + } + v = b.Get(bytes.Join([][]byte{dt, []byte(target)}, []byte("@"))) != nil + return nil + }); err != nil { + return false + } + return v +} + +func (s *Store) WalkBacklinks(id string, fn func(id string, link solver.CacheInfoLink) error) error { + var outIDs []string + var outLinks []solver.CacheInfoLink + + if err := s.db.View(func(tx *bolt.Tx) error { + links := tx.Bucket([]byte(linksBucket)) + if links == nil { + return nil + } + backLinks := tx.Bucket([]byte(backlinksBucket)) + if backLinks == nil { + return nil + } + b := backLinks.Bucket([]byte(id)) + if b == nil { + return nil + } + + if err := b.ForEach(func(bid, v []byte) error { + b = links.Bucket(bid) + if b == nil { + return nil + } + if err := b.ForEach(func(k, v []byte) error { + parts := bytes.Split(k, []byte("@")) + if len(parts) == 2 { + if string(parts[1]) != id { + return nil + } + var l solver.CacheInfoLink + if err := json.Unmarshal(parts[0], &l); err != nil { + return err + } + l.Digest = digest.FromBytes([]byte(fmt.Sprintf("%s@%d", l.Digest, l.Output))) + l.Output = 0 + outIDs = append(outIDs, string(bid)) + outLinks = append(outLinks, l) + } + return nil + }); err != nil { + return err + } + return nil + }); err != nil { + return err + } + + return nil + }); err != nil { + return err + } + + for i := range outIDs { + if err := fn(outIDs[i], outLinks[i]); err != nil { + return err + } + } + return nil +} + +func isEmptyBucket(b *bolt.Bucket) bool { + if b == nil { + return true + } + k, _ := b.Cursor().First() + return k == nil +} diff --git a/vendor/github.com/moby/buildkit/solver/cachekey.go b/vendor/github.com/moby/buildkit/solver/cachekey.go new file mode 100644 index 00000000..3749af0a --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/cachekey.go @@ -0,0 +1,66 @@ +package solver + +import ( + "sync" + + digest "github.com/opencontainers/go-digest" +) + +// NewCacheKey creates a new cache key for a specific output index +func NewCacheKey(dgst digest.Digest, output Index) *CacheKey { + return &CacheKey{ + ID: rootKey(dgst, output).String(), + digest: dgst, + output: output, + ids: map[*cacheManager]string{}, + } +} + +// CacheKeyWithSelector combines a cache key with an optional selector digest. +// Used to limit the matches for dependency cache key. +type CacheKeyWithSelector struct { + Selector digest.Digest + CacheKey ExportableCacheKey +} + +type CacheKey struct { + mu sync.RWMutex + + ID string + deps [][]CacheKeyWithSelector // only [][]*inMemoryCacheKey + digest digest.Digest + output Index + ids map[*cacheManager]string + + indexIDs []string +} + +func (ck *CacheKey) Deps() [][]CacheKeyWithSelector { + ck.mu.RLock() + defer ck.mu.RUnlock() + deps := make([][]CacheKeyWithSelector, len(ck.deps)) + for i := range ck.deps { + deps[i] = append([]CacheKeyWithSelector(nil), ck.deps[i]...) + } + return deps +} + +func (ck *CacheKey) Digest() digest.Digest { + return ck.digest +} +func (ck *CacheKey) Output() Index { + return ck.output +} + +func (ck *CacheKey) clone() *CacheKey { + nk := &CacheKey{ + ID: ck.ID, + digest: ck.digest, + output: ck.output, + ids: map[*cacheManager]string{}, + } + for cm, id := range ck.ids { + nk.ids[cm] = id + } + return nk +} diff --git a/vendor/github.com/moby/buildkit/solver/cachemanager.go b/vendor/github.com/moby/buildkit/solver/cachemanager.go new file mode 100644 index 00000000..b628b55a --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/cachemanager.go @@ -0,0 +1,358 @@ +package solver + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/moby/buildkit/identity" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// NewInMemoryCacheManager creates a new in-memory cache manager +func NewInMemoryCacheManager() CacheManager { + return NewCacheManager(identity.NewID(), NewInMemoryCacheStorage(), NewInMemoryResultStorage()) +} + +// NewCacheManager creates a new cache manager with specific storage backend +func NewCacheManager(id string, storage CacheKeyStorage, results CacheResultStorage) CacheManager { + cm := &cacheManager{ + id: id, + backend: storage, + results: results, + } + + if err := cm.ReleaseUnreferenced(); err != nil { + logrus.Errorf("failed to release unreferenced cache metadata: %+v", err) + } + + return cm +} + +type cacheManager struct { + mu sync.RWMutex + id string + + backend CacheKeyStorage + results CacheResultStorage +} + +func (c *cacheManager) ReleaseUnreferenced() error { + return c.backend.Walk(func(id string) error { + return c.backend.WalkResults(id, func(cr CacheResult) error { + if !c.results.Exists(cr.ID) { + c.backend.Release(cr.ID) + } + return nil + }) + }) +} + +func (c *cacheManager) ID() string { + return c.id +} + +func (c *cacheManager) Query(deps []CacheKeyWithSelector, input Index, dgst digest.Digest, output Index) ([]*CacheKey, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + type dep struct { + results map[string]struct{} + key CacheKeyWithSelector + } + + allDeps := make([]dep, 0, len(deps)) + for _, k := range deps { + allDeps = append(allDeps, dep{key: k, results: map[string]struct{}{}}) + } + + allRes := map[string]*CacheKey{} + for _, d := range allDeps { + if err := c.backend.WalkLinks(c.getID(d.key.CacheKey.CacheKey), CacheInfoLink{input, output, dgst, d.key.Selector}, func(id string) error { + d.results[id] = struct{}{} + if _, ok := allRes[id]; !ok { + allRes[id] = c.newKeyWithID(id, dgst, output) + } + return nil + }); err != nil { + return nil, err + } + } + + // link the results against the keys that didn't exist + for id, key := range allRes { + for _, d := range allDeps { + if _, ok := d.results[id]; !ok { + if err := c.backend.AddLink(c.getID(d.key.CacheKey.CacheKey), CacheInfoLink{ + Input: input, + Output: output, + Digest: dgst, + Selector: d.key.Selector, + }, c.getID(key)); err != nil { + return nil, err + } + } + } + } + + if len(deps) == 0 { + if !c.backend.Exists(rootKey(dgst, output).String()) { + return nil, nil + } + return []*CacheKey{c.newRootKey(dgst, output)}, nil + } + + keys := make([]*CacheKey, 0, len(deps)) + for _, k := range allRes { + keys = append(keys, k) + } + return keys, nil +} + +func (c *cacheManager) Records(ck *CacheKey) ([]*CacheRecord, error) { + outs := make([]*CacheRecord, 0) + if err := c.backend.WalkResults(c.getID(ck), func(r CacheResult) error { + if c.results.Exists(r.ID) { + outs = append(outs, &CacheRecord{ + ID: r.ID, + cacheManager: c, + key: ck, + CreatedAt: r.CreatedAt, + }) + } else { + c.backend.Release(r.ID) + } + return nil + }); err != nil { + return nil, err + } + return outs, nil +} + +func (c *cacheManager) Load(ctx context.Context, rec *CacheRecord) (Result, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + res, err := c.backend.Load(c.getID(rec.key), rec.ID) + if err != nil { + return nil, err + } + + return c.results.Load(ctx, res) +} + +type LoadedResult struct { + Result Result + CacheResult CacheResult + CacheKey *CacheKey +} + +func (c *cacheManager) filterResults(m map[string]Result, ck *CacheKey, visited map[string]struct{}) (results []LoadedResult, err error) { + id := c.getID(ck) + if _, ok := visited[id]; ok { + return nil, nil + } + visited[id] = struct{}{} + if err := c.backend.WalkResults(id, func(cr CacheResult) error { + res, ok := m[id] + if ok { + results = append(results, LoadedResult{ + Result: res, + CacheKey: ck, + CacheResult: cr, + }) + delete(m, id) + } + return nil + }); err != nil { + for _, r := range results { + r.Result.Release(context.TODO()) + } + } + for _, keys := range ck.Deps() { + for _, key := range keys { + res, err := c.filterResults(m, key.CacheKey.CacheKey, visited) + if err != nil { + for _, r := range results { + r.Result.Release(context.TODO()) + } + return nil, err + } + results = append(results, res...) + } + } + return +} + +func (c *cacheManager) LoadWithParents(ctx context.Context, rec *CacheRecord) ([]LoadedResult, error) { + lwp, ok := c.results.(interface { + LoadWithParents(context.Context, CacheResult) (map[string]Result, error) + }) + if !ok { + res, err := c.Load(ctx, rec) + if err != nil { + return nil, err + } + return []LoadedResult{{Result: res, CacheKey: rec.key, CacheResult: CacheResult{ID: c.getID(rec.key), CreatedAt: rec.CreatedAt}}}, nil + } + c.mu.RLock() + defer c.mu.RUnlock() + + cr, err := c.backend.Load(c.getID(rec.key), rec.ID) + if err != nil { + return nil, err + } + + m, err := lwp.LoadWithParents(ctx, cr) + if err != nil { + return nil, err + } + + results, err := c.filterResults(m, rec.key, map[string]struct{}{}) + if err != nil { + for _, r := range m { + r.Release(context.TODO()) + } + } + + return results, nil +} + +func (c *cacheManager) Save(k *CacheKey, r Result, createdAt time.Time) (*ExportableCacheKey, error) { + c.mu.Lock() + defer c.mu.Unlock() + + res, err := c.results.Save(r, createdAt) + if err != nil { + return nil, err + } + if err := c.backend.AddResult(c.getID(k), res); err != nil { + return nil, err + } + + if err := c.ensurePersistentKey(k); err != nil { + return nil, err + } + + rec := &CacheRecord{ + ID: res.ID, + cacheManager: c, + key: k, + CreatedAt: res.CreatedAt, + } + + return &ExportableCacheKey{ + CacheKey: k, + Exporter: &exporter{k: k, record: rec}, + }, nil +} + +func newKey() *CacheKey { + return &CacheKey{ids: map[*cacheManager]string{}} +} + +func (c *cacheManager) newKeyWithID(id string, dgst digest.Digest, output Index) *CacheKey { + k := newKey() + k.digest = dgst + k.output = output + k.ID = id + k.ids[c] = id + return k +} + +func (c *cacheManager) newRootKey(dgst digest.Digest, output Index) *CacheKey { + return c.newKeyWithID(rootKey(dgst, output).String(), dgst, output) +} + +func (c *cacheManager) getID(k *CacheKey) string { + k.mu.Lock() + id, ok := k.ids[c] + if ok { + k.mu.Unlock() + return id + } + if len(k.deps) == 0 { + k.ids[c] = k.ID + k.mu.Unlock() + return k.ID + } + id = c.getIDFromDeps(k) + k.ids[c] = id + k.mu.Unlock() + return id +} + +func (c *cacheManager) ensurePersistentKey(k *CacheKey) error { + id := c.getID(k) + for i, deps := range k.Deps() { + for _, ck := range deps { + l := CacheInfoLink{ + Input: Index(i), + Output: Index(k.Output()), + Digest: k.Digest(), + Selector: ck.Selector, + } + ckID := c.getID(ck.CacheKey.CacheKey) + if !c.backend.HasLink(ckID, l, id) { + if err := c.ensurePersistentKey(ck.CacheKey.CacheKey); err != nil { + return err + } + if err := c.backend.AddLink(ckID, l, id); err != nil { + return err + } + } + } + } + return nil +} + +func (c *cacheManager) getIDFromDeps(k *CacheKey) string { + matches := map[string]struct{}{} + + for i, deps := range k.deps { + if i == 0 || len(matches) > 0 { + for _, ck := range deps { + m2 := make(map[string]struct{}) + if err := c.backend.WalkLinks(c.getID(ck.CacheKey.CacheKey), CacheInfoLink{ + Input: Index(i), + Output: Index(k.Output()), + Digest: k.Digest(), + Selector: ck.Selector, + }, func(id string) error { + if i == 0 { + matches[id] = struct{}{} + } else { + m2[id] = struct{}{} + } + return nil + }); err != nil { + matches = map[string]struct{}{} + break + } + if i != 0 { + for id := range matches { + if _, ok := m2[id]; !ok { + delete(matches, id) + } + } + } + } + } + } + + for k := range matches { + return k + } + + return identity.NewID() +} + +func rootKey(dgst digest.Digest, output Index) digest.Digest { + if strings.HasPrefix(dgst.String(), "random:") { + return digest.Digest("random:" + strings.TrimPrefix(digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, output))).String(), digest.Canonical.String()+":")) + } + return digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, output))) +} diff --git a/vendor/github.com/moby/buildkit/solver/cachestorage.go b/vendor/github.com/moby/buildkit/solver/cachestorage.go new file mode 100644 index 00000000..cc1e2113 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/cachestorage.go @@ -0,0 +1,51 @@ +package solver + +import ( + "context" + "time" + + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +var ErrNotFound = errors.Errorf("not found") + +// CacheKeyStorage is interface for persisting cache metadata +type CacheKeyStorage interface { + Exists(id string) bool + Walk(fn func(id string) error) error + + WalkResults(id string, fn func(CacheResult) error) error + Load(id string, resultID string) (CacheResult, error) + AddResult(id string, res CacheResult) error + Release(resultID string) error + WalkIDsByResult(resultID string, fn func(string) error) error + + AddLink(id string, link CacheInfoLink, target string) error + WalkLinks(id string, link CacheInfoLink, fn func(id string) error) error + HasLink(id string, link CacheInfoLink, target string) bool + WalkBacklinks(id string, fn func(id string, link CacheInfoLink) error) error +} + +// CacheResult is a record for a single solve result +type CacheResult struct { + CreatedAt time.Time + ID string +} + +// CacheInfoLink is a link between two cache keys +type CacheInfoLink struct { + Input Index `json:"Input,omitempty"` + Output Index `json:"Output,omitempty"` + Digest digest.Digest `json:"Digest,omitempty"` + Selector digest.Digest `json:"Selector,omitempty"` +} + +// CacheResultStorage is interface for converting cache metadata result to +// actual solve result +type CacheResultStorage interface { + Save(Result, time.Time) (CacheResult, error) + Load(ctx context.Context, res CacheResult) (Result, error) + LoadRemote(ctx context.Context, res CacheResult) (*Remote, error) + Exists(id string) bool +} diff --git a/vendor/github.com/moby/buildkit/solver/combinedcache.go b/vendor/github.com/moby/buildkit/solver/combinedcache.go new file mode 100644 index 00000000..89361bcc --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/combinedcache.go @@ -0,0 +1,143 @@ +package solver + +import ( + "context" + "strings" + "sync" + "time" + + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +func NewCombinedCacheManager(cms []CacheManager, main CacheManager) CacheManager { + return &combinedCacheManager{cms: cms, main: main} +} + +type combinedCacheManager struct { + cms []CacheManager + main CacheManager + id string + idOnce sync.Once +} + +func (cm *combinedCacheManager) ID() string { + cm.idOnce.Do(func() { + ids := make([]string, len(cm.cms)) + for i, c := range cm.cms { + ids[i] = c.ID() + } + cm.id = digest.FromBytes([]byte(strings.Join(ids, ","))).String() + }) + return cm.id +} + +func (cm *combinedCacheManager) Query(inp []CacheKeyWithSelector, inputIndex Index, dgst digest.Digest, outputIndex Index) ([]*CacheKey, error) { + eg, _ := errgroup.WithContext(context.TODO()) + keys := make(map[string]*CacheKey, len(cm.cms)) + var mu sync.Mutex + for _, c := range cm.cms { + func(c CacheManager) { + eg.Go(func() error { + recs, err := c.Query(inp, inputIndex, dgst, outputIndex) + if err != nil { + return err + } + mu.Lock() + for _, r := range recs { + if _, ok := keys[r.ID]; !ok || c == cm.main { + keys[r.ID] = r + } + } + mu.Unlock() + return nil + }) + }(c) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + out := make([]*CacheKey, 0, len(keys)) + for _, k := range keys { + out = append(out, k) + } + return out, nil +} + +func (cm *combinedCacheManager) Load(ctx context.Context, rec *CacheRecord) (res Result, err error) { + results, err := rec.cacheManager.LoadWithParents(ctx, rec) + if err != nil { + return nil, err + } + defer func() { + for i, res := range results { + if err == nil && i == 0 { + continue + } + res.Result.Release(context.TODO()) + } + }() + if rec.cacheManager != cm.main && cm.main != nil { + for _, res := range results { + if _, err := cm.main.Save(res.CacheKey, res.Result, res.CacheResult.CreatedAt); err != nil { + return nil, err + } + } + } + if len(results) == 0 { // TODO: handle gracefully + return nil, errors.Errorf("failed to load deleted cache") + } + return results[0].Result, nil +} + +func (cm *combinedCacheManager) Save(key *CacheKey, s Result, createdAt time.Time) (*ExportableCacheKey, error) { + if cm.main == nil { + return nil, nil + } + return cm.main.Save(key, s, createdAt) +} + +func (cm *combinedCacheManager) Records(ck *CacheKey) ([]*CacheRecord, error) { + if len(ck.ids) == 0 { + return nil, errors.Errorf("no results") + } + + records := map[string]*CacheRecord{} + var mu sync.Mutex + + eg, _ := errgroup.WithContext(context.TODO()) + for c := range ck.ids { + func(c *cacheManager) { + eg.Go(func() error { + recs, err := c.Records(ck) + if err != nil { + return err + } + mu.Lock() + for _, rec := range recs { + if _, ok := records[rec.ID]; !ok || c == cm.main { + if c == cm.main { + rec.Priority = 1 + } + records[rec.ID] = rec + } + } + mu.Unlock() + return nil + }) + }(c) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + out := make([]*CacheRecord, 0, len(records)) + for _, rec := range records { + out = append(out, rec) + } + return out, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/edge.go b/vendor/github.com/moby/buildkit/solver/edge.go new file mode 100644 index 00000000..52613adb --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/edge.go @@ -0,0 +1,950 @@ +package solver + +import ( + "context" + "sync" + "time" + + "github.com/moby/buildkit/solver/internal/pipe" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type edgeStatusType int + +const ( + edgeStatusInitial edgeStatusType = iota + edgeStatusCacheFast + edgeStatusCacheSlow + edgeStatusComplete +) + +func (t edgeStatusType) String() string { + return []string{"initial", "cache-fast", "cache-slow", "complete"}[t] +} + +func newEdge(ed Edge, op activeOp, index *edgeIndex) *edge { + e := &edge{ + edge: ed, + op: op, + depRequests: map[pipe.Receiver]*dep{}, + keyMap: map[string]struct{}{}, + cacheRecords: map[string]*CacheRecord{}, + cacheRecordsLoaded: map[string]struct{}{}, + index: index, + } + return e +} + +type edge struct { + edge Edge + op activeOp + + edgeState + depRequests map[pipe.Receiver]*dep + deps []*dep + + cacheMapReq pipe.Receiver + cacheMapDone bool + cacheMapIndex int + cacheMapDigests []digest.Digest + execReq pipe.Receiver + execCacheLoad bool + err error + cacheRecords map[string]*CacheRecord + cacheRecordsLoaded map[string]struct{} + keyMap map[string]struct{} + + noCacheMatchPossible bool + allDepsCompletedCacheFast bool + allDepsCompletedCacheSlow bool + allDepsStateCacheSlow bool + allDepsCompleted bool + hasActiveOutgoing bool + + releaserCount int + keysDidChange bool + index *edgeIndex + + secondaryExporters []expDep +} + +// dep holds state for a dependant edge +type dep struct { + req pipe.Receiver + edgeState + index Index + keyMap map[string]*CacheKey + desiredState edgeStatusType + e *edge + slowCacheReq pipe.Receiver + slowCacheComplete bool + slowCacheFoundKey bool + slowCacheKey *ExportableCacheKey + err error +} + +// expDep holds secorndary exporter info for dependency +type expDep struct { + index int + cacheKey CacheKeyWithSelector +} + +func newDep(i Index) *dep { + return &dep{index: i, keyMap: map[string]*CacheKey{}} +} + +// edgePipe is a pipe for requests between two edges +type edgePipe struct { + *pipe.Pipe + From, Target *edge + mu sync.Mutex +} + +// edgeState hold basic mutable state info for an edge +type edgeState struct { + state edgeStatusType + result *SharedCachedResult + cacheMap *CacheMap + keys []ExportableCacheKey +} + +type edgeRequest struct { + desiredState edgeStatusType + currentState edgeState + currentKeys int +} + +// incrementReferenceCount increases the number of times release needs to be +// called to release the edge. Called on merging edges. +func (e *edge) incrementReferenceCount() { + e.releaserCount += 1 +} + +// release releases the edge resources +func (e *edge) release() { + if e.releaserCount > 0 { + e.releaserCount-- + return + } + e.index.Release(e) + if e.result != nil { + go e.result.Release(context.TODO()) + } +} + +// commitOptions returns parameters for the op execution +func (e *edge) commitOptions() ([]*CacheKey, []CachedResult) { + k := NewCacheKey(e.cacheMap.Digest, e.edge.Index) + if len(e.deps) == 0 { + keys := make([]*CacheKey, 0, len(e.cacheMapDigests)) + for _, dgst := range e.cacheMapDigests { + keys = append(keys, NewCacheKey(dgst, e.edge.Index)) + } + return keys, nil + } + + inputs := make([][]CacheKeyWithSelector, len(e.deps)) + results := make([]CachedResult, len(e.deps)) + for i, dep := range e.deps { + for _, k := range dep.result.CacheKeys() { + inputs[i] = append(inputs[i], CacheKeyWithSelector{CacheKey: k, Selector: e.cacheMap.Deps[i].Selector}) + } + if dep.slowCacheKey != nil { + inputs[i] = append(inputs[i], CacheKeyWithSelector{CacheKey: *dep.slowCacheKey}) + } + results[i] = dep.result + } + + k.deps = inputs + return []*CacheKey{k}, results +} + +// isComplete returns true if edge state is final and will never change +func (e *edge) isComplete() bool { + return e.err != nil || e.result != nil +} + +// finishIncoming finalizes the incoming pipe request +func (e *edge) finishIncoming(req pipe.Sender) { + err := e.err + if req.Request().Canceled && err == nil { + err = context.Canceled + } + if debugScheduler { + logrus.Debugf("finishIncoming %s %v %#v desired=%s", e.edge.Vertex.Name(), err, e.edgeState, req.Request().Payload.(*edgeRequest).desiredState) + } + req.Finalize(&e.edgeState, err) +} + +// updateIncoming updates the current value of incoming pipe request +func (e *edge) updateIncoming(req pipe.Sender) { + if debugScheduler { + logrus.Debugf("updateIncoming %s %#v desired=%s", e.edge.Vertex.Name(), e.edgeState, req.Request().Payload.(*edgeRequest).desiredState) + } + req.Update(&e.edgeState) +} + +// probeCache is called with unprocessed cache keys for dependency +// if the key could match the edge, the cacheRecords for dependency are filled +func (e *edge) probeCache(d *dep, depKeys []CacheKeyWithSelector) bool { + if len(depKeys) == 0 { + return false + } + if e.op.IgnoreCache() { + return false + } + keys, err := e.op.Cache().Query(depKeys, d.index, e.cacheMap.Digest, e.edge.Index) + if err != nil { + e.err = errors.Wrap(err, "error on cache query") + } + found := false + for _, k := range keys { + if _, ok := d.keyMap[k.ID]; !ok { + d.keyMap[k.ID] = k + found = true + } + } + return found +} + +// checkDepMatchPossible checks if any cache matches are possible past this point +func (e *edge) checkDepMatchPossible(dep *dep) { + depHasSlowCache := e.cacheMap.Deps[dep.index].ComputeDigestFunc != nil + if !e.noCacheMatchPossible && (((!dep.slowCacheFoundKey && dep.slowCacheComplete && depHasSlowCache) || (!depHasSlowCache && dep.state >= edgeStatusCacheSlow)) && len(dep.keyMap) == 0) { + e.noCacheMatchPossible = true + } +} + +// slowCacheFunc returns the result based cache func for dependency if it exists +func (e *edge) slowCacheFunc(dep *dep) ResultBasedCacheFunc { + if e.cacheMap == nil { + return nil + } + return e.cacheMap.Deps[int(dep.index)].ComputeDigestFunc +} + +// allDepsHaveKeys checks if all dependencies have at least one key. used for +// determining if there is enough data for combining cache key for edge +func (e *edge) allDepsHaveKeys(matching bool) bool { + if e.cacheMap == nil { + return false + } + for _, d := range e.deps { + cond := len(d.keys) == 0 + if matching { + cond = len(d.keyMap) == 0 + } + if cond && d.slowCacheKey == nil && d.result == nil { + return false + } + } + return true +} + +// depKeys returns all current dependency cache keys +func (e *edge) currentIndexKey() *CacheKey { + if e.cacheMap == nil { + return nil + } + + keys := make([][]CacheKeyWithSelector, len(e.deps)) + for i, d := range e.deps { + if len(d.keys) == 0 && d.result == nil { + return nil + } + for _, k := range d.keys { + keys[i] = append(keys[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: k}) + } + if d.result != nil { + for _, rk := range d.result.CacheKeys() { + keys[i] = append(keys[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: rk}) + } + if d.slowCacheKey != nil { + keys[i] = append(keys[i], CacheKeyWithSelector{CacheKey: ExportableCacheKey{CacheKey: d.slowCacheKey.CacheKey, Exporter: &exporter{k: d.slowCacheKey.CacheKey}}}) + } + } + } + + k := NewCacheKey(e.cacheMap.Digest, e.edge.Index) + k.deps = keys + + return k +} + +// slow cache keys can be computed in 2 phases if there are multiple deps. +// first evaluate ones that didn't match any definition based keys +func (e *edge) skipPhase2SlowCache(dep *dep) bool { + isPhase1 := false + for _, dep := range e.deps { + if (!dep.slowCacheComplete && e.slowCacheFunc(dep) != nil || dep.state < edgeStatusCacheSlow) && len(dep.keyMap) == 0 { + isPhase1 = true + break + } + } + + if isPhase1 && !dep.slowCacheComplete && e.slowCacheFunc(dep) != nil && len(dep.keyMap) > 0 { + return true + } + return false +} + +func (e *edge) skipPhase2FastCache(dep *dep) bool { + isPhase1 := false + for _, dep := range e.deps { + if e.cacheMap == nil || len(dep.keyMap) == 0 && ((!dep.slowCacheComplete && e.slowCacheFunc(dep) != nil) || (dep.state < edgeStatusComplete && e.slowCacheFunc(dep) == nil)) { + isPhase1 = true + break + } + } + + if isPhase1 && len(dep.keyMap) > 0 { + return true + } + return false +} + +// unpark is called by the scheduler with incoming requests and updates for +// previous calls. +// To avoid deadlocks and resource leaks this function needs to follow +// following rules: +// 1) this function needs to return unclosed outgoing requests if some incoming +// requests were not completed +// 2) this function may not return outgoing requests if it has completed all +// incoming requests +func (e *edge) unpark(incoming []pipe.Sender, updates, allPipes []pipe.Receiver, f *pipeFactory) { + // process all incoming changes + depChanged := false + for _, upt := range updates { + if changed := e.processUpdate(upt); changed { + depChanged = true + } + } + + if depChanged { + // the dep responses had changes. need to reevaluate edge state + e.recalcCurrentState() + } + + desiredState, done := e.respondToIncoming(incoming, allPipes) + if done { + return + } + + cacheMapReq := false + // set up new outgoing requests if needed + if e.cacheMapReq == nil && (e.cacheMap == nil || len(e.cacheRecords) == 0) { + index := e.cacheMapIndex + e.cacheMapReq = f.NewFuncRequest(func(ctx context.Context) (interface{}, error) { + cm, err := e.op.CacheMap(ctx, index) + return cm, errors.Wrap(err, "failed to load cache key") + }) + cacheMapReq = true + } + + // execute op + if e.execReq == nil && desiredState == edgeStatusComplete { + if ok := e.execIfPossible(f); ok { + return + } + } + + if e.execReq == nil { + if added := e.createInputRequests(desiredState, f, false); !added && !e.hasActiveOutgoing && !cacheMapReq { + logrus.Errorf("buildkit scheluding error: leaving incoming open. forcing solve. Please report this with BUILDKIT_SCHEDULER_DEBUG=1") + e.createInputRequests(desiredState, f, true) + } + } + +} + +func (e *edge) makeExportable(k *CacheKey, records []*CacheRecord) ExportableCacheKey { + return ExportableCacheKey{ + CacheKey: k, + Exporter: &exporter{k: k, records: records, override: e.edge.Vertex.Options().ExportCache}, + } +} + +func (e *edge) markFailed(f *pipeFactory, err error) { + e.err = err + e.postpone(f) +} + +// processUpdate is called by unpark for every updated pipe request +func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) { + // response for cachemap request + if upt == e.cacheMapReq && upt.Status().Completed { + if err := upt.Status().Err; err != nil { + e.cacheMapReq = nil + if !upt.Status().Canceled && e.err == nil { + e.err = err + } + } else { + resp := upt.Status().Value.(*cacheMapResp) + e.cacheMap = resp.CacheMap + e.cacheMapDone = resp.complete + e.cacheMapIndex++ + if len(e.deps) == 0 { + e.cacheMapDigests = append(e.cacheMapDigests, e.cacheMap.Digest) + if !e.op.IgnoreCache() { + keys, err := e.op.Cache().Query(nil, 0, e.cacheMap.Digest, e.edge.Index) + if err != nil { + logrus.Error(errors.Wrap(err, "invalid query response")) // make the build fail for this error + } else { + for _, k := range keys { + records, err := e.op.Cache().Records(k) + if err != nil { + logrus.Errorf("error receiving cache records: %v", err) + continue + } + + for _, r := range records { + e.cacheRecords[r.ID] = r + } + + e.keys = append(e.keys, e.makeExportable(k, records)) + } + } + } + e.state = edgeStatusCacheSlow + } + if e.allDepsHaveKeys(false) { + e.keysDidChange = true + } + // probe keys that were loaded before cache map + for i, dep := range e.deps { + e.probeCache(dep, withSelector(dep.keys, e.cacheMap.Deps[i].Selector)) + e.checkDepMatchPossible(dep) + } + if !e.cacheMapDone { + e.cacheMapReq = nil + } + } + return true + } + + // response for exec request + if upt == e.execReq && upt.Status().Completed { + if err := upt.Status().Err; err != nil { + e.execReq = nil + if e.execCacheLoad { + for k := range e.cacheRecordsLoaded { + delete(e.cacheRecords, k) + } + } else if !upt.Status().Canceled && e.err == nil { + e.err = err + } + } else { + e.result = NewSharedCachedResult(upt.Status().Value.(CachedResult)) + e.state = edgeStatusComplete + } + return true + } + + // response for requests to dependencies + if dep, ok := e.depRequests[upt]; ok { + if err := upt.Status().Err; !upt.Status().Canceled && upt.Status().Completed && err != nil { + if e.err == nil { + e.err = err + } + dep.err = err + } + + state := upt.Status().Value.(*edgeState) + + if len(dep.keys) < len(state.keys) { + newKeys := state.keys[len(dep.keys):] + if e.cacheMap != nil { + e.probeCache(dep, withSelector(newKeys, e.cacheMap.Deps[dep.index].Selector)) + dep.edgeState.keys = state.keys + if e.allDepsHaveKeys(false) { + e.keysDidChange = true + } + } + depChanged = true + } + if dep.state != edgeStatusComplete && state.state == edgeStatusComplete { + e.keysDidChange = true + } + + recheck := state.state != dep.state + + dep.edgeState = *state + + if recheck && e.cacheMap != nil { + e.checkDepMatchPossible(dep) + depChanged = true + } + + return + } + + // response for result based cache function + for i, dep := range e.deps { + if upt == dep.slowCacheReq && upt.Status().Completed { + if err := upt.Status().Err; err != nil { + dep.slowCacheReq = nil + if !upt.Status().Canceled && e.err == nil { + e.err = upt.Status().Err + } + } else if !dep.slowCacheComplete { + k := NewCacheKey(upt.Status().Value.(digest.Digest), -1) + dep.slowCacheKey = &ExportableCacheKey{CacheKey: k, Exporter: &exporter{k: k}} + slowKeyExp := CacheKeyWithSelector{CacheKey: *dep.slowCacheKey} + defKeys := make([]CacheKeyWithSelector, 0, len(dep.result.CacheKeys())) + for _, dk := range dep.result.CacheKeys() { + defKeys = append(defKeys, CacheKeyWithSelector{CacheKey: dk, Selector: e.cacheMap.Deps[i].Selector}) + } + dep.slowCacheFoundKey = e.probeCache(dep, []CacheKeyWithSelector{slowKeyExp}) + + // connect def key to slow key + e.op.Cache().Query(append(defKeys, slowKeyExp), dep.index, e.cacheMap.Digest, e.edge.Index) + + dep.slowCacheComplete = true + e.keysDidChange = true + e.checkDepMatchPossible(dep) // not matching key here doesn't set nocachematch possible to true + } + return true + } + } + + return +} + +// recalcCurrentState is called by unpark to recompute internal state after +// the state of dependencies has changed +func (e *edge) recalcCurrentState() { + // TODO: fast pass to detect incomplete results + newKeys := map[string]*CacheKey{} + + for i, dep := range e.deps { + if i == 0 { + for id, k := range dep.keyMap { + if _, ok := e.keyMap[id]; ok { + continue + } + newKeys[id] = k + } + } else { + for id := range newKeys { + if _, ok := dep.keyMap[id]; !ok { + delete(newKeys, id) + } + } + } + if len(newKeys) == 0 { + break + } + } + + for key := range newKeys { + e.keyMap[key] = struct{}{} + } + + for _, r := range newKeys { + // TODO: add all deps automatically + mergedKey := r.clone() + mergedKey.deps = make([][]CacheKeyWithSelector, len(e.deps)) + for i, dep := range e.deps { + if dep.result != nil { + for _, dk := range dep.result.CacheKeys() { + mergedKey.deps[i] = append(mergedKey.deps[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: dk}) + } + if dep.slowCacheKey != nil { + mergedKey.deps[i] = append(mergedKey.deps[i], CacheKeyWithSelector{CacheKey: *dep.slowCacheKey}) + } + } else { + for _, k := range dep.keys { + mergedKey.deps[i] = append(mergedKey.deps[i], CacheKeyWithSelector{Selector: e.cacheMap.Deps[i].Selector, CacheKey: k}) + } + } + } + + records, err := e.op.Cache().Records(mergedKey) + if err != nil { + logrus.Errorf("error receiving cache records: %v", err) + continue + } + + for _, r := range records { + if _, ok := e.cacheRecordsLoaded[r.ID]; !ok { + e.cacheRecords[r.ID] = r + } + } + + e.keys = append(e.keys, e.makeExportable(mergedKey, records)) + } + + // detect lower/upper bound for current state + allDepsCompletedCacheFast := e.cacheMap != nil + allDepsCompletedCacheSlow := e.cacheMap != nil + allDepsStateCacheSlow := true + allDepsCompleted := true + stLow := edgeStatusInitial // minimal possible state + stHigh := edgeStatusCacheSlow // maximum possible state + if e.cacheMap != nil { + for _, dep := range e.deps { + isSlowIncomplete := e.slowCacheFunc(dep) != nil && (dep.state == edgeStatusCacheSlow || (dep.state == edgeStatusComplete && !dep.slowCacheComplete)) + + if dep.state > stLow && len(dep.keyMap) == 0 && !isSlowIncomplete { + stLow = dep.state + if stLow > edgeStatusCacheSlow { + stLow = edgeStatusCacheSlow + } + } + effectiveState := dep.state + if dep.state == edgeStatusCacheSlow && isSlowIncomplete { + effectiveState = edgeStatusCacheFast + } + if dep.state == edgeStatusComplete && isSlowIncomplete { + effectiveState = edgeStatusCacheFast + } + if effectiveState < stHigh { + stHigh = effectiveState + } + if isSlowIncomplete || dep.state < edgeStatusComplete { + allDepsCompleted = false + } + if dep.state < edgeStatusCacheFast { + allDepsCompletedCacheFast = false + } + if isSlowIncomplete || dep.state < edgeStatusCacheSlow { + allDepsCompletedCacheSlow = false + } + if dep.state < edgeStatusCacheSlow && len(dep.keyMap) == 0 { + allDepsStateCacheSlow = false + } + } + if stLow > e.state { + e.state = stLow + } + if stHigh > e.state { + e.state = stHigh + } + if !e.cacheMapDone && len(e.keys) == 0 { + e.state = edgeStatusInitial + } + + e.allDepsCompletedCacheFast = e.cacheMapDone && allDepsCompletedCacheFast + e.allDepsCompletedCacheSlow = e.cacheMapDone && allDepsCompletedCacheSlow + e.allDepsStateCacheSlow = e.cacheMapDone && allDepsStateCacheSlow + e.allDepsCompleted = e.cacheMapDone && allDepsCompleted + + if e.allDepsStateCacheSlow && len(e.cacheRecords) > 0 && e.state == edgeStatusCacheFast { + openKeys := map[string]struct{}{} + for _, dep := range e.deps { + isSlowIncomplete := e.slowCacheFunc(dep) != nil && (dep.state == edgeStatusCacheSlow || (dep.state == edgeStatusComplete && !dep.slowCacheComplete)) + if !isSlowIncomplete { + openDepKeys := map[string]struct{}{} + for key := range dep.keyMap { + if _, ok := e.keyMap[key]; !ok { + openDepKeys[key] = struct{}{} + } + } + if len(openKeys) != 0 { + for k := range openKeys { + if _, ok := openDepKeys[k]; !ok { + delete(openKeys, k) + } + } + } else { + openKeys = openDepKeys + } + if len(openKeys) == 0 { + e.state = edgeStatusCacheSlow + if debugScheduler { + logrus.Debugf("upgrade to cache-slow because no open keys") + } + } + } + } + } + } +} + +// respondToIncoming responds to all incoming requests. completing or +// updating them when possible +func (e *edge) respondToIncoming(incoming []pipe.Sender, allPipes []pipe.Receiver) (edgeStatusType, bool) { + // detect the result state for the requests + allIncomingCanComplete := true + desiredState := e.state + allCanceled := true + + // check incoming requests + // check if all requests can be either answered or canceled + if !e.isComplete() { + for _, req := range incoming { + if !req.Request().Canceled { + allCanceled = false + if r := req.Request().Payload.(*edgeRequest); desiredState < r.desiredState { + desiredState = r.desiredState + if e.hasActiveOutgoing || r.desiredState == edgeStatusComplete || r.currentKeys == len(e.keys) { + allIncomingCanComplete = false + } + } + } + } + } + + // do not set allIncomingCanComplete if active ongoing can modify the state + if !allCanceled && e.state < edgeStatusComplete && len(e.keys) == 0 && e.hasActiveOutgoing { + allIncomingCanComplete = false + } + + if debugScheduler { + logrus.Debugf("status state=%s cancomplete=%v hasouts=%v noPossibleCache=%v depsCacheFast=%v keys=%d cacheRecords=%d", e.state, allIncomingCanComplete, e.hasActiveOutgoing, e.noCacheMatchPossible, e.allDepsCompletedCacheFast, len(e.keys), len(e.cacheRecords)) + } + + if allIncomingCanComplete && e.hasActiveOutgoing { + // cancel all current requests + for _, p := range allPipes { + p.Cancel() + } + + // can close all but one requests + var leaveOpen pipe.Sender + for _, req := range incoming { + if !req.Request().Canceled { + leaveOpen = req + break + } + } + for _, req := range incoming { + if leaveOpen == nil || leaveOpen == req { + leaveOpen = req + continue + } + e.finishIncoming(req) + } + return desiredState, true + } + + // can complete, finish and return + if allIncomingCanComplete && !e.hasActiveOutgoing { + for _, req := range incoming { + e.finishIncoming(req) + } + return desiredState, true + } + + // update incoming based on current state + for _, req := range incoming { + r := req.Request().Payload.(*edgeRequest) + if req.Request().Canceled { + e.finishIncoming(req) + } else if !e.hasActiveOutgoing && e.state >= r.desiredState { + e.finishIncoming(req) + } else if !isEqualState(r.currentState, e.edgeState) && !req.Request().Canceled { + e.updateIncoming(req) + } + } + return desiredState, false +} + +// createInputRequests creates new requests for dependencies or async functions +// that need to complete to continue processing the edge +func (e *edge) createInputRequests(desiredState edgeStatusType, f *pipeFactory, force bool) bool { + addedNew := false + + // initialize deps state + if e.deps == nil { + e.depRequests = make(map[pipe.Receiver]*dep) + e.deps = make([]*dep, 0, len(e.edge.Vertex.Inputs())) + for i := range e.edge.Vertex.Inputs() { + e.deps = append(e.deps, newDep(Index(i))) + } + } + + // cycle all dependencies. set up outgoing requests if needed + for _, dep := range e.deps { + desiredStateDep := dep.state + + if e.noCacheMatchPossible || force { + desiredStateDep = edgeStatusComplete + } else if dep.state == edgeStatusInitial && desiredState > dep.state { + desiredStateDep = edgeStatusCacheFast + } else if dep.state == edgeStatusCacheFast && desiredState > dep.state { + // wait all deps to complete cache fast before continuing with slow cache + if (e.allDepsCompletedCacheFast && len(e.keys) == 0) || len(dep.keyMap) == 0 || e.allDepsHaveKeys(true) { + if !e.skipPhase2FastCache(dep) && e.cacheMap != nil { + desiredStateDep = edgeStatusCacheSlow + } + } + } else if e.cacheMap != nil && dep.state == edgeStatusCacheSlow && desiredState == edgeStatusComplete { + // if all deps have completed cache-slow or content based cache for input is available + if (len(dep.keyMap) == 0 || e.allDepsCompletedCacheSlow || (!e.skipPhase2FastCache(dep) && e.slowCacheFunc(dep) != nil)) && (len(e.cacheRecords) == 0) { + if len(dep.keyMap) == 0 || !e.skipPhase2SlowCache(dep) { + desiredStateDep = edgeStatusComplete + } + } + } else if e.cacheMap != nil && dep.state == edgeStatusCacheSlow && e.slowCacheFunc(dep) != nil && desiredState == edgeStatusCacheSlow { + if len(dep.keyMap) == 0 || !e.skipPhase2SlowCache(dep) { + desiredStateDep = edgeStatusComplete + } + } + + // outgoing request is needed + if dep.state < desiredStateDep { + addNew := true + if dep.req != nil && !dep.req.Status().Completed { + if dep.req.Request().(*edgeRequest).desiredState != desiredStateDep { + dep.req.Cancel() + } else { + addNew = false + } + } + if addNew { + req := f.NewInputRequest(e.edge.Vertex.Inputs()[int(dep.index)], &edgeRequest{ + currentState: dep.edgeState, + desiredState: desiredStateDep, + currentKeys: len(dep.keys), + }) + e.depRequests[req] = dep + dep.req = req + addedNew = true + } + } + // initialize function to compute cache key based on dependency result + if dep.state == edgeStatusComplete && dep.slowCacheReq == nil && e.slowCacheFunc(dep) != nil && e.cacheMap != nil { + fn := e.slowCacheFunc(dep) + res := dep.result + func(fn ResultBasedCacheFunc, res Result, index Index) { + dep.slowCacheReq = f.NewFuncRequest(func(ctx context.Context) (interface{}, error) { + v, err := e.op.CalcSlowCache(ctx, index, fn, res) + return v, errors.Wrap(err, "failed to compute cache key") + }) + }(fn, res, dep.index) + addedNew = true + } + } + return addedNew +} + +// execIfPossible creates a request for getting the edge result if there is +// enough state +func (e *edge) execIfPossible(f *pipeFactory) bool { + if len(e.cacheRecords) > 0 { + if e.keysDidChange { + e.postpone(f) + return true + } + e.execReq = f.NewFuncRequest(e.loadCache) + e.execCacheLoad = true + for req := range e.depRequests { + req.Cancel() + } + return true + } else if e.allDepsCompleted { + if e.keysDidChange { + e.postpone(f) + return true + } + e.execReq = f.NewFuncRequest(e.execOp) + e.execCacheLoad = false + return true + } + return false +} + +// postpone delays exec to next unpark invocation if we have unprocessed keys +func (e *edge) postpone(f *pipeFactory) { + f.NewFuncRequest(func(context.Context) (interface{}, error) { + return nil, nil + }) +} + +// loadCache creates a request to load edge result from cache +func (e *edge) loadCache(ctx context.Context) (interface{}, error) { + recs := make([]*CacheRecord, 0, len(e.cacheRecords)) + for _, r := range e.cacheRecords { + recs = append(recs, r) + } + + rec := getBestResult(recs) + e.cacheRecordsLoaded[rec.ID] = struct{}{} + + logrus.Debugf("load cache for %s with %s", e.edge.Vertex.Name(), rec.ID) + res, err := e.op.LoadCache(ctx, rec) + if err != nil { + return nil, errors.Wrap(err, "failed to load cache") + } + + return NewCachedResult(res, []ExportableCacheKey{{CacheKey: rec.key, Exporter: &exporter{k: rec.key, record: rec, edge: e}}}), nil +} + +// execOp creates a request to execute the vertex operation +func (e *edge) execOp(ctx context.Context) (interface{}, error) { + cacheKeys, inputs := e.commitOptions() + results, subExporters, err := e.op.Exec(ctx, toResultSlice(inputs)) + if err != nil { + return nil, errors.WithStack(err) + } + + index := e.edge.Index + if len(results) <= int(index) { + return nil, errors.Errorf("invalid response from exec need %d index but %d results received", index, len(results)) + } + + res := results[int(index)] + + for i := range results { + if i != int(index) { + go results[i].Release(context.TODO()) + } + } + + var exporters []CacheExporter + + for _, cacheKey := range cacheKeys { + ck, err := e.op.Cache().Save(cacheKey, res, time.Now()) + if err != nil { + return nil, err + } + + if exp, ok := ck.Exporter.(*exporter); ok { + exp.edge = e + } + + exps := make([]CacheExporter, 0, len(subExporters)) + for _, exp := range subExporters { + exps = append(exps, exp.Exporter) + } + + exporters = append(exporters, ck.Exporter) + exporters = append(exporters, exps...) + } + + ek := make([]ExportableCacheKey, 0, len(cacheKeys)) + for _, ck := range cacheKeys { + ek = append(ek, ExportableCacheKey{ + CacheKey: ck, + Exporter: &mergedExporter{exporters: exporters}, + }) + } + + return NewCachedResult(res, ek), nil +} + +func toResultSlice(cres []CachedResult) (out []Result) { + out = make([]Result, len(cres)) + for i := range cres { + out[i] = cres[i].(Result) + } + return out +} + +func isEqualState(s1, s2 edgeState) bool { + if s1.state != s2.state || s1.result != s2.result || s1.cacheMap != s2.cacheMap || len(s1.keys) != len(s2.keys) { + return false + } + return true +} + +func withSelector(keys []ExportableCacheKey, selector digest.Digest) []CacheKeyWithSelector { + out := make([]CacheKeyWithSelector, len(keys)) + for i, k := range keys { + out[i] = CacheKeyWithSelector{Selector: selector, CacheKey: k} + } + return out +} diff --git a/vendor/github.com/moby/buildkit/solver/exporter.go b/vendor/github.com/moby/buildkit/solver/exporter.go new file mode 100644 index 00000000..6a073960 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/exporter.go @@ -0,0 +1,211 @@ +package solver + +import ( + "context" + + digest "github.com/opencontainers/go-digest" +) + +type exporter struct { + k *CacheKey + records []*CacheRecord + record *CacheRecord + + res []CacheExporterRecord + edge *edge // for secondaryExporters + override *bool +} + +func addBacklinks(t CacheExporterTarget, rec CacheExporterRecord, cm *cacheManager, id string, bkm map[string]CacheExporterRecord) (CacheExporterRecord, error) { + if rec == nil { + var ok bool + rec, ok = bkm[id] + if ok && rec != nil { + return rec, nil + } + _ = ok + } + bkm[id] = nil + if err := cm.backend.WalkBacklinks(id, func(id string, link CacheInfoLink) error { + if rec == nil { + rec = t.Add(link.Digest) + } + r, ok := bkm[id] + if !ok { + var err error + r, err = addBacklinks(t, nil, cm, id, bkm) + if err != nil { + return err + } + } + if r != nil { + rec.LinkFrom(r, int(link.Input), link.Selector.String()) + } + return nil + }); err != nil { + return nil, err + } + if rec == nil { + rec = t.Add(digest.Digest(id)) + } + bkm[id] = rec + return rec, nil +} + +type backlinkT struct{} + +var backlinkKey = backlinkT{} + +func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt CacheExportOpt) ([]CacheExporterRecord, error) { + var bkm map[string]CacheExporterRecord + + if bk := ctx.Value(backlinkKey); bk == nil { + bkm = map[string]CacheExporterRecord{} + ctx = context.WithValue(ctx, backlinkKey, bkm) + } else { + bkm = bk.(map[string]CacheExporterRecord) + } + + if t.Visited(e) { + return e.res, nil + } + t.Visit(e) + + deps := e.k.Deps() + + type expr struct { + r CacheExporterRecord + selector digest.Digest + } + + rec := t.Add(rootKey(e.k.Digest(), e.k.Output())) + allRec := []CacheExporterRecord{rec} + + addRecord := true + + if e.override != nil { + addRecord = *e.override + } + + if e.record == nil && len(e.k.Deps()) > 0 { + e.record = getBestResult(e.records) + } + + var remote *Remote + if v := e.record; v != nil && len(e.k.Deps()) > 0 && addRecord { + cm := v.cacheManager + key := cm.getID(v.key) + res, err := cm.backend.Load(key, v.ID) + if err != nil { + return nil, err + } + + remote, err = cm.results.LoadRemote(ctx, res) + if err != nil { + return nil, err + } + + if remote == nil && opt.Mode != CacheExportModeRemoteOnly { + res, err := cm.results.Load(ctx, res) + if err != nil { + return nil, err + } + remote, err = opt.Convert(ctx, res) + if err != nil { + return nil, err + } + res.Release(context.TODO()) + } + + if remote != nil { + for _, rec := range allRec { + rec.AddResult(v.CreatedAt, remote) + } + } + } + + if remote != nil && opt.Mode == CacheExportModeMin { + opt.Mode = CacheExportModeRemoteOnly + } + + srcs := make([][]expr, len(deps)) + + for i, deps := range deps { + for _, dep := range deps { + recs, err := dep.CacheKey.Exporter.ExportTo(ctx, t, opt) + if err != nil { + return nil, nil + } + for _, r := range recs { + srcs[i] = append(srcs[i], expr{r: r, selector: dep.Selector}) + } + } + } + + if e.edge != nil { + for _, de := range e.edge.secondaryExporters { + recs, err := de.cacheKey.CacheKey.Exporter.ExportTo(ctx, t, opt) + if err != nil { + return nil, nil + } + for _, r := range recs { + srcs[de.index] = append(srcs[de.index], expr{r: r, selector: de.cacheKey.Selector}) + } + } + } + + for i, srcs := range srcs { + for _, src := range srcs { + rec.LinkFrom(src.r, i, src.selector.String()) + } + } + + for cm, id := range e.k.ids { + if _, err := addBacklinks(t, rec, cm, id, bkm); err != nil { + return nil, err + } + } + + if v := e.record; v != nil && len(deps) == 0 { + cm := v.cacheManager + key := cm.getID(v.key) + if err := cm.backend.WalkIDsByResult(v.ID, func(id string) error { + if id == key { + return nil + } + allRec = append(allRec, t.Add(digest.Digest(id))) + return nil + }); err != nil { + return nil, err + } + } + + e.res = allRec + + return e.res, nil +} + +func getBestResult(records []*CacheRecord) *CacheRecord { + var rec *CacheRecord + for _, r := range records { + if rec == nil || rec.CreatedAt.Before(r.CreatedAt) || (rec.CreatedAt.Equal(r.CreatedAt) && rec.Priority < r.Priority) { + rec = r + } + } + return rec +} + +type mergedExporter struct { + exporters []CacheExporter +} + +func (e *mergedExporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt CacheExportOpt) (er []CacheExporterRecord, err error) { + for _, e := range e.exporters { + r, err := e.ExportTo(ctx, t, opt) + if err != nil { + return nil, err + } + er = append(er, r...) + } + return +} diff --git a/vendor/github.com/moby/buildkit/solver/index.go b/vendor/github.com/moby/buildkit/solver/index.go new file mode 100644 index 00000000..78a2cca2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/index.go @@ -0,0 +1,245 @@ +package solver + +import ( + "sync" + + "github.com/moby/buildkit/identity" +) + +// edgeIndex is a synchronous map for detecting edge collisions. +type edgeIndex struct { + mu sync.Mutex + + items map[string]*indexItem + backRefs map[*edge]map[string]struct{} +} + +type indexItem struct { + edge *edge + links map[CacheInfoLink]map[string]struct{} + deps map[string]struct{} +} + +func newEdgeIndex() *edgeIndex { + return &edgeIndex{ + items: map[string]*indexItem{}, + backRefs: map[*edge]map[string]struct{}{}, + } +} + +func (ei *edgeIndex) Release(e *edge) { + ei.mu.Lock() + defer ei.mu.Unlock() + + for id := range ei.backRefs[e] { + ei.releaseEdge(id, e) + } + delete(ei.backRefs, e) +} + +func (ei *edgeIndex) releaseEdge(id string, e *edge) { + item, ok := ei.items[id] + if !ok { + return + } + + item.edge = nil + + if len(item.links) == 0 { + for d := range item.deps { + ei.releaseLink(d, id) + } + delete(ei.items, id) + } +} + +func (ei *edgeIndex) releaseLink(id, target string) { + item, ok := ei.items[id] + if !ok { + return + } + + for lid, links := range item.links { + for check := range links { + if check == target { + delete(links, check) + } + } + if len(links) == 0 { + delete(item.links, lid) + } + } + + if item.edge == nil && len(item.links) == 0 { + for d := range item.deps { + ei.releaseLink(d, id) + } + delete(ei.items, id) + } +} + +func (ei *edgeIndex) LoadOrStore(k *CacheKey, e *edge) *edge { + ei.mu.Lock() + defer ei.mu.Unlock() + + // get all current edges that match the cachekey + ids := ei.getAllMatches(k) + + var oldID string + var old *edge + + for _, id := range ids { + if item, ok := ei.items[id]; ok { + if item.edge != e { + oldID = id + old = item.edge + } + } + } + + if old != nil && !(!isIgnoreCache(old) && isIgnoreCache(e)) { + ei.enforceLinked(oldID, k) + return old + } + + id := identity.NewID() + if len(ids) > 0 { + id = ids[0] + } + + ei.enforceLinked(id, k) + + ei.items[id].edge = e + backRefs, ok := ei.backRefs[e] + if !ok { + backRefs = map[string]struct{}{} + ei.backRefs[e] = backRefs + } + backRefs[id] = struct{}{} + + return nil +} + +// enforceLinked adds links from current ID to all dep keys +func (er *edgeIndex) enforceLinked(id string, k *CacheKey) { + main, ok := er.items[id] + if !ok { + main = &indexItem{ + links: map[CacheInfoLink]map[string]struct{}{}, + deps: map[string]struct{}{}, + } + er.items[id] = main + } + + deps := k.Deps() + + for i, dd := range deps { + for _, d := range dd { + ck := d.CacheKey.CacheKey + er.enforceIndexID(ck) + ll := CacheInfoLink{Input: Index(i), Digest: k.Digest(), Output: k.Output(), Selector: d.Selector} + for _, ckID := range ck.indexIDs { + if item, ok := er.items[ckID]; ok { + links, ok := item.links[ll] + if !ok { + links = map[string]struct{}{} + item.links[ll] = links + } + links[id] = struct{}{} + main.deps[ckID] = struct{}{} + } + } + } + } +} + +func (ei *edgeIndex) enforceIndexID(k *CacheKey) { + if len(k.indexIDs) > 0 { + return + } + + matches := ei.getAllMatches(k) + + if len(matches) > 0 { + k.indexIDs = matches + } else { + k.indexIDs = []string{identity.NewID()} + } + + for _, id := range k.indexIDs { + ei.enforceLinked(id, k) + } +} + +func (ei *edgeIndex) getAllMatches(k *CacheKey) []string { + deps := k.Deps() + + if len(deps) == 0 { + return []string{rootKey(k.Digest(), k.Output()).String()} + } + + for _, dd := range deps { + for _, k := range dd { + ei.enforceIndexID(k.CacheKey.CacheKey) + } + } + + matches := map[string]struct{}{} + + for i, dd := range deps { + if i == 0 { + for _, d := range dd { + ll := CacheInfoLink{Input: Index(i), Digest: k.Digest(), Output: k.Output(), Selector: d.Selector} + for _, ckID := range d.CacheKey.CacheKey.indexIDs { + item, ok := ei.items[ckID] + if ok { + for l := range item.links[ll] { + matches[l] = struct{}{} + } + } + } + } + continue + } + + if len(matches) == 0 { + break + } + + for m := range matches { + found := false + for _, d := range dd { + ll := CacheInfoLink{Input: Index(i), Digest: k.Digest(), Output: k.Output(), Selector: d.Selector} + for _, ckID := range d.CacheKey.CacheKey.indexIDs { + if item, ok := ei.items[ckID]; ok { + if l, ok := item.links[ll]; ok { + if _, ok := l[m]; ok { + found = true + break + } + } + } + } + } + + if !found { + delete(matches, m) + } + } + } + + out := make([]string, 0, len(matches)) + + for m := range matches { + out = append(out, m) + } + + return out +} + +func isIgnoreCache(e *edge) bool { + if e.edge.Vertex == nil { + return false + } + return e.edge.Vertex.Options().IgnoreCache +} diff --git a/vendor/github.com/moby/buildkit/solver/internal/pipe/pipe.go b/vendor/github.com/moby/buildkit/solver/internal/pipe/pipe.go new file mode 100644 index 00000000..81702f52 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/internal/pipe/pipe.go @@ -0,0 +1,199 @@ +package pipe + +import ( + "context" + "sync" + "sync/atomic" + + "github.com/pkg/errors" +) + +type channel struct { + OnSendCompletion func() + value atomic.Value + lastValue *wrappedValue +} + +type wrappedValue struct { + value interface{} +} + +func (c *channel) Send(v interface{}) { + c.value.Store(&wrappedValue{value: v}) + if c.OnSendCompletion != nil { + c.OnSendCompletion() + } +} + +func (c *channel) Receive() (interface{}, bool) { + v := c.value.Load() + if v == nil || v.(*wrappedValue) == c.lastValue { + return nil, false + } + c.lastValue = v.(*wrappedValue) + return v.(*wrappedValue).value, true +} + +type Pipe struct { + Sender Sender + Receiver Receiver + OnReceiveCompletion func() + OnSendCompletion func() +} + +type Request struct { + Payload interface{} + Canceled bool +} + +type Sender interface { + Request() Request + Update(v interface{}) + Finalize(v interface{}, err error) + Status() Status +} + +type Receiver interface { + Receive() bool + Cancel() + Status() Status + Request() interface{} +} + +type Status struct { + Canceled bool + Completed bool + Err error + Value interface{} +} + +func NewWithFunction(f func(context.Context) (interface{}, error)) (*Pipe, func()) { + p := New(Request{}) + + ctx, cancel := context.WithCancel(context.TODO()) + + p.OnReceiveCompletion = func() { + if req := p.Sender.Request(); req.Canceled { + cancel() + } + } + + return p, func() { + res, err := f(ctx) + if err != nil { + p.Sender.Finalize(nil, err) + return + } + p.Sender.Finalize(res, nil) + } +} + +func New(req Request) *Pipe { + cancelCh := &channel{} + roundTripCh := &channel{} + pw := &sender{ + req: req, + sendChannel: roundTripCh, + } + pr := &receiver{ + req: req, + recvChannel: roundTripCh, + sendChannel: cancelCh, + } + + p := &Pipe{ + Sender: pw, + Receiver: pr, + } + + cancelCh.OnSendCompletion = func() { + v, ok := cancelCh.Receive() + if ok { + pw.setRequest(v.(Request)) + } + if p.OnReceiveCompletion != nil { + p.OnReceiveCompletion() + } + } + + roundTripCh.OnSendCompletion = func() { + if p.OnSendCompletion != nil { + p.OnSendCompletion() + } + } + + return p +} + +type sender struct { + status Status + req Request + sendChannel *channel + mu sync.Mutex +} + +func (pw *sender) Status() Status { + return pw.status +} + +func (pw *sender) Request() Request { + pw.mu.Lock() + defer pw.mu.Unlock() + return pw.req +} + +func (pw *sender) setRequest(req Request) { + pw.mu.Lock() + defer pw.mu.Unlock() + pw.req = req +} + +func (pw *sender) Update(v interface{}) { + pw.status.Value = v + pw.sendChannel.Send(pw.status) +} + +func (pw *sender) Finalize(v interface{}, err error) { + if v != nil { + pw.status.Value = v + } + pw.status.Err = err + pw.status.Completed = true + if errors.Cause(err) == context.Canceled && pw.req.Canceled { + pw.status.Canceled = true + } + pw.sendChannel.Send(pw.status) +} + +type receiver struct { + status Status + req Request + recvChannel *channel + sendChannel *channel +} + +func (pr *receiver) Request() interface{} { + return pr.req.Payload +} + +func (pr *receiver) Receive() bool { + v, ok := pr.recvChannel.Receive() + if !ok { + return false + } + pr.status = v.(Status) + return true +} + +func (pr *receiver) Cancel() { + req := pr.req + if req.Canceled { + return + } + req.Canceled = true + pr.sendChannel.Send(req) +} + +func (pr *receiver) Status() Status { + return pr.status +} diff --git a/vendor/github.com/moby/buildkit/solver/jobs.go b/vendor/github.com/moby/buildkit/solver/jobs.go new file mode 100644 index 00000000..925ae5c5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/jobs.go @@ -0,0 +1,806 @@ +package solver + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/tracing" + digest "github.com/opencontainers/go-digest" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" +) + +// ResolveOpFunc finds an Op implementation for a Vertex +type ResolveOpFunc func(Vertex, Builder) (Op, error) + +type Builder interface { + Build(ctx context.Context, e Edge) (CachedResult, error) + Context(ctx context.Context) context.Context + EachValue(ctx context.Context, key string, fn func(interface{}) error) error +} + +// Solver provides a shared graph of all the vertexes currently being +// processed. Every vertex that is being solved needs to be loaded into job +// first. Vertex operations are invoked and progress tracking happens through +// jobs. +type Solver struct { + mu sync.RWMutex + jobs map[string]*Job + actives map[digest.Digest]*state + opts SolverOpt + + updateCond *sync.Cond + s *scheduler + index *edgeIndex +} + +type state struct { + jobs map[*Job]struct{} + parents map[digest.Digest]struct{} + childVtx map[digest.Digest]struct{} + + mpw *progress.MultiWriter + allPw map[progress.Writer]struct{} + mspan *tracing.MultiSpan + allSpan map[opentracing.Span]struct{} + + vtx Vertex + clientVertex client.Vertex + + mu sync.Mutex + op *sharedOp + edges map[Index]*edge + opts SolverOpt + index *edgeIndex + + cache map[string]CacheManager + mainCache CacheManager + solver *Solver +} + +func (s *state) getSessionID() string { + // TODO: connect with sessionmanager to avoid getting dropped sessions + s.mu.Lock() + for j := range s.jobs { + if j.SessionID != "" { + s.mu.Unlock() + return j.SessionID + } + } + parents := map[digest.Digest]struct{}{} + for p := range s.parents { + parents[p] = struct{}{} + } + s.mu.Unlock() + + for p := range parents { + s.solver.mu.Lock() + pst, ok := s.solver.actives[p] + s.solver.mu.Unlock() + if ok { + if sessionID := pst.getSessionID(); sessionID != "" { + return sessionID + } + } + } + return "" +} + +func (s *state) builder() *subBuilder { + return &subBuilder{state: s} +} + +func (s *state) getEdge(index Index) *edge { + s.mu.Lock() + defer s.mu.Unlock() + if e, ok := s.edges[index]; ok { + return e + } + + if s.op == nil { + s.op = newSharedOp(s.opts.ResolveOpFunc, s.opts.DefaultCache, s) + } + + e := newEdge(Edge{Index: index, Vertex: s.vtx}, s.op, s.index) + s.edges[index] = e + return e +} + +func (s *state) setEdge(index Index, newEdge *edge) { + s.mu.Lock() + defer s.mu.Unlock() + e, ok := s.edges[index] + if ok { + if e == newEdge { + return + } + e.release() + } + + newEdge.incrementReferenceCount() + s.edges[index] = newEdge +} + +func (s *state) combinedCacheManager() CacheManager { + s.mu.Lock() + cms := make([]CacheManager, 0, len(s.cache)+1) + cms = append(cms, s.mainCache) + for _, cm := range s.cache { + cms = append(cms, cm) + } + s.mu.Unlock() + + if len(cms) == 1 { + return s.mainCache + } + + return NewCombinedCacheManager(cms, s.mainCache) +} + +func (s *state) Release() { + for _, e := range s.edges { + e.release() + } + if s.op != nil { + s.op.release() + } +} + +type subBuilder struct { + *state + mu sync.Mutex + exporters []ExportableCacheKey +} + +func (sb *subBuilder) Build(ctx context.Context, e Edge) (CachedResult, error) { + res, err := sb.solver.subBuild(ctx, e, sb.vtx) + if err != nil { + return nil, err + } + sb.mu.Lock() + sb.exporters = append(sb.exporters, res.CacheKeys()[0]) // all keys already have full export chain + sb.mu.Unlock() + return res, nil +} + +func (sb *subBuilder) Context(ctx context.Context) context.Context { + ctx = session.NewContext(ctx, sb.state.getSessionID()) + return opentracing.ContextWithSpan(progress.WithProgress(ctx, sb.mpw), sb.mspan) +} + +func (sb *subBuilder) EachValue(ctx context.Context, key string, fn func(interface{}) error) error { + sb.mu.Lock() + defer sb.mu.Unlock() + for j := range sb.jobs { + if err := j.EachValue(ctx, key, fn); err != nil { + return err + } + } + return nil +} + +type Job struct { + list *Solver + pr *progress.MultiReader + pw progress.Writer + span opentracing.Span + values sync.Map + + progressCloser func() + SessionID string +} + +type SolverOpt struct { + ResolveOpFunc ResolveOpFunc + DefaultCache CacheManager +} + +func NewSolver(opts SolverOpt) *Solver { + if opts.DefaultCache == nil { + opts.DefaultCache = NewInMemoryCacheManager() + } + jl := &Solver{ + jobs: make(map[string]*Job), + actives: make(map[digest.Digest]*state), + opts: opts, + index: newEdgeIndex(), + } + jl.s = newScheduler(jl) + jl.updateCond = sync.NewCond(jl.mu.RLocker()) + return jl +} + +func (jl *Solver) setEdge(e Edge, newEdge *edge) { + jl.mu.RLock() + defer jl.mu.RUnlock() + + st, ok := jl.actives[e.Vertex.Digest()] + if !ok { + return + } + + st.setEdge(e.Index, newEdge) +} + +func (jl *Solver) getEdge(e Edge) *edge { + jl.mu.RLock() + defer jl.mu.RUnlock() + + st, ok := jl.actives[e.Vertex.Digest()] + if !ok { + return nil + } + return st.getEdge(e.Index) +} + +func (jl *Solver) subBuild(ctx context.Context, e Edge, parent Vertex) (CachedResult, error) { + v, err := jl.load(e.Vertex, parent, nil) + if err != nil { + return nil, err + } + e.Vertex = v + return jl.s.build(ctx, e) +} + +func (jl *Solver) Close() { + jl.s.Stop() +} + +func (jl *Solver) load(v, parent Vertex, j *Job) (Vertex, error) { + jl.mu.Lock() + defer jl.mu.Unlock() + + cache := map[Vertex]Vertex{} + + return jl.loadUnlocked(v, parent, j, cache) +} + +func (jl *Solver) loadUnlocked(v, parent Vertex, j *Job, cache map[Vertex]Vertex) (Vertex, error) { + if v, ok := cache[v]; ok { + return v, nil + } + origVtx := v + + inputs := make([]Edge, len(v.Inputs())) + for i, e := range v.Inputs() { + v, err := jl.loadUnlocked(e.Vertex, parent, j, cache) + if err != nil { + return nil, err + } + inputs[i] = Edge{Index: e.Index, Vertex: v} + } + + dgst := v.Digest() + + dgstWithoutCache := digest.FromBytes([]byte(fmt.Sprintf("%s-ignorecache", dgst))) + + // if same vertex is already loaded without cache just use that + st, ok := jl.actives[dgstWithoutCache] + + if !ok { + st, ok = jl.actives[dgst] + + // !ignorecache merges with ignorecache but ignorecache doesn't merge with !ignorecache + if ok && !st.vtx.Options().IgnoreCache && v.Options().IgnoreCache { + dgst = dgstWithoutCache + } + + v = &vertexWithCacheOptions{ + Vertex: v, + dgst: dgst, + inputs: inputs, + } + + st, ok = jl.actives[dgst] + } + + if !ok { + st = &state{ + opts: jl.opts, + jobs: map[*Job]struct{}{}, + parents: map[digest.Digest]struct{}{}, + childVtx: map[digest.Digest]struct{}{}, + allPw: map[progress.Writer]struct{}{}, + allSpan: map[opentracing.Span]struct{}{}, + mpw: progress.NewMultiWriter(progress.WithMetadata("vertex", dgst)), + mspan: tracing.NewMultiSpan(), + vtx: v, + clientVertex: initClientVertex(v), + edges: map[Index]*edge{}, + index: jl.index, + mainCache: jl.opts.DefaultCache, + cache: map[string]CacheManager{}, + solver: jl, + } + jl.actives[dgst] = st + } + + st.mu.Lock() + for _, cache := range v.Options().CacheSources { + if cache.ID() != st.mainCache.ID() { + if _, ok := st.cache[cache.ID()]; !ok { + st.cache[cache.ID()] = cache + } + } + } + + if j != nil { + if _, ok := st.jobs[j]; !ok { + st.jobs[j] = struct{}{} + } + } + st.mu.Unlock() + + if parent != nil { + if _, ok := st.parents[parent.Digest()]; !ok { + st.parents[parent.Digest()] = struct{}{} + parentState, ok := jl.actives[parent.Digest()] + if !ok { + return nil, errors.Errorf("inactive parent %s", parent.Digest()) + } + parentState.childVtx[dgst] = struct{}{} + + for id, c := range parentState.cache { + st.cache[id] = c + } + } + } + + jl.connectProgressFromState(st, st) + cache[origVtx] = v + return v, nil +} + +func (jl *Solver) connectProgressFromState(target, src *state) { + for j := range src.jobs { + if _, ok := target.allPw[j.pw]; !ok { + target.mpw.Add(j.pw) + target.allPw[j.pw] = struct{}{} + j.pw.Write(target.clientVertex.Digest.String(), target.clientVertex) + target.mspan.Add(j.span) + target.allSpan[j.span] = struct{}{} + } + } + for p := range src.parents { + jl.connectProgressFromState(target, jl.actives[p]) + } +} + +func (jl *Solver) NewJob(id string) (*Job, error) { + jl.mu.Lock() + defer jl.mu.Unlock() + + if _, ok := jl.jobs[id]; ok { + return nil, errors.Errorf("job ID %s exists", id) + } + + pr, ctx, progressCloser := progress.NewContext(context.Background()) + pw, _, _ := progress.FromContext(ctx) // TODO: expose progress.Pipe() + + j := &Job{ + list: jl, + pr: progress.NewMultiReader(pr), + pw: pw, + progressCloser: progressCloser, + span: (&opentracing.NoopTracer{}).StartSpan(""), + } + jl.jobs[id] = j + + jl.updateCond.Broadcast() + + return j, nil +} + +func (jl *Solver) Get(id string) (*Job, error) { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + go func() { + <-ctx.Done() + jl.mu.Lock() + jl.updateCond.Broadcast() + jl.mu.Unlock() + }() + + jl.mu.RLock() + defer jl.mu.RUnlock() + for { + select { + case <-ctx.Done(): + return nil, errors.Errorf("no such job %s", id) + default: + } + j, ok := jl.jobs[id] + if !ok { + jl.updateCond.Wait() + continue + } + return j, nil + } +} + +// called with solver lock +func (jl *Solver) deleteIfUnreferenced(k digest.Digest, st *state) { + if len(st.jobs) == 0 && len(st.parents) == 0 { + for chKey := range st.childVtx { + chState := jl.actives[chKey] + delete(chState.parents, k) + jl.deleteIfUnreferenced(chKey, chState) + } + st.Release() + delete(jl.actives, k) + } +} + +func (j *Job) Build(ctx context.Context, e Edge) (CachedResult, error) { + if span := opentracing.SpanFromContext(ctx); span != nil { + j.span = span + } + + v, err := j.list.load(e.Vertex, nil, j) + if err != nil { + return nil, err + } + e.Vertex = v + return j.list.s.build(ctx, e) +} + +func (j *Job) Discard() error { + defer j.progressCloser() + + j.list.mu.Lock() + defer j.list.mu.Unlock() + + j.pw.Close() + + for k, st := range j.list.actives { + st.mu.Lock() + if _, ok := st.jobs[j]; ok { + delete(st.jobs, j) + j.list.deleteIfUnreferenced(k, st) + } + if _, ok := st.allPw[j.pw]; ok { + delete(st.allPw, j.pw) + } + if _, ok := st.allSpan[j.span]; ok { + delete(st.allSpan, j.span) + } + st.mu.Unlock() + } + return nil +} + +func (j *Job) Context(ctx context.Context) context.Context { + ctx = session.NewContext(ctx, j.SessionID) + return progress.WithProgress(ctx, j.pw) +} + +func (j *Job) SetValue(key string, v interface{}) { + j.values.Store(key, v) +} + +func (j *Job) EachValue(ctx context.Context, key string, fn func(interface{}) error) error { + v, ok := j.values.Load(key) + if ok { + return fn(v) + } + return nil +} + +type cacheMapResp struct { + *CacheMap + complete bool +} + +type activeOp interface { + CacheMap(context.Context, int) (*cacheMapResp, error) + LoadCache(ctx context.Context, rec *CacheRecord) (Result, error) + Exec(ctx context.Context, inputs []Result) (outputs []Result, exporters []ExportableCacheKey, err error) + IgnoreCache() bool + Cache() CacheManager + CalcSlowCache(context.Context, Index, ResultBasedCacheFunc, Result) (digest.Digest, error) +} + +func newSharedOp(resolver ResolveOpFunc, cacheManager CacheManager, st *state) *sharedOp { + so := &sharedOp{ + resolver: resolver, + st: st, + slowCacheRes: map[Index]digest.Digest{}, + slowCacheErr: map[Index]error{}, + } + return so +} + +type execRes struct { + execRes []*SharedResult + execExporters []ExportableCacheKey +} + +type sharedOp struct { + resolver ResolveOpFunc + st *state + g flightcontrol.Group + + opOnce sync.Once + op Op + subBuilder *subBuilder + err error + + execRes *execRes + execErr error + + cacheRes []*CacheMap + cacheDone bool + cacheErr error + + slowMu sync.Mutex + slowCacheRes map[Index]digest.Digest + slowCacheErr map[Index]error +} + +func (s *sharedOp) IgnoreCache() bool { + return s.st.vtx.Options().IgnoreCache +} + +func (s *sharedOp) Cache() CacheManager { + return s.st.combinedCacheManager() +} + +func (s *sharedOp) LoadCache(ctx context.Context, rec *CacheRecord) (Result, error) { + ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) + // no cache hit. start evaluating the node + span, ctx := tracing.StartSpan(ctx, "load cache: "+s.st.vtx.Name()) + notifyStarted(ctx, &s.st.clientVertex, true) + res, err := s.Cache().Load(ctx, rec) + tracing.FinishWithError(span, err) + notifyCompleted(ctx, &s.st.clientVertex, err, true) + return res, err +} + +func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, f ResultBasedCacheFunc, res Result) (digest.Digest, error) { + key, err := s.g.Do(ctx, fmt.Sprintf("slow-compute-%d", index), func(ctx context.Context) (interface{}, error) { + s.slowMu.Lock() + // TODO: add helpers for these stored values + if res := s.slowCacheRes[index]; res != "" { + s.slowMu.Unlock() + return res, nil + } + if err := s.slowCacheErr[index]; err != nil { + s.slowMu.Unlock() + return err, nil + } + s.slowMu.Unlock() + ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) + key, err := f(ctx, res) + complete := true + if err != nil { + select { + case <-ctx.Done(): + if strings.Contains(err.Error(), context.Canceled.Error()) { + complete = false + err = errors.Wrap(ctx.Err(), err.Error()) + } + default: + } + } + s.slowMu.Lock() + defer s.slowMu.Unlock() + if complete { + if err == nil { + s.slowCacheRes[index] = key + } + s.slowCacheErr[index] = err + } + return key, err + }) + if err != nil { + ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) + notifyStarted(ctx, &s.st.clientVertex, false) + notifyCompleted(ctx, &s.st.clientVertex, err, false) + return "", err + } + return key.(digest.Digest), nil +} + +func (s *sharedOp) CacheMap(ctx context.Context, index int) (*cacheMapResp, error) { + op, err := s.getOp() + if err != nil { + return nil, err + } + res, err := s.g.Do(ctx, "cachemap", func(ctx context.Context) (ret interface{}, retErr error) { + if s.cacheRes != nil && s.cacheDone || index < len(s.cacheRes) { + return s.cacheRes, nil + } + if s.cacheErr != nil { + return nil, s.cacheErr + } + ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) + ctx = session.NewContext(ctx, s.st.getSessionID()) + if len(s.st.vtx.Inputs()) == 0 { + // no cache hit. start evaluating the node + span, ctx := tracing.StartSpan(ctx, "cache request: "+s.st.vtx.Name()) + notifyStarted(ctx, &s.st.clientVertex, false) + defer func() { + tracing.FinishWithError(span, retErr) + notifyCompleted(ctx, &s.st.clientVertex, retErr, false) + }() + } + res, done, err := op.CacheMap(ctx, len(s.cacheRes)) + complete := true + if err != nil { + select { + case <-ctx.Done(): + if strings.Contains(err.Error(), context.Canceled.Error()) { + complete = false + err = errors.Wrap(ctx.Err(), err.Error()) + } + default: + } + } + if complete { + if err == nil { + s.cacheRes = append(s.cacheRes, res) + s.cacheDone = done + } + s.cacheErr = err + } + return s.cacheRes, err + }) + if err != nil { + return nil, err + } + + if len(res.([]*CacheMap)) <= index { + return s.CacheMap(ctx, index) + } + + return &cacheMapResp{CacheMap: res.([]*CacheMap)[index], complete: s.cacheDone}, nil +} + +func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, exporters []ExportableCacheKey, err error) { + op, err := s.getOp() + if err != nil { + return nil, nil, err + } + res, err := s.g.Do(ctx, "exec", func(ctx context.Context) (ret interface{}, retErr error) { + if s.execRes != nil || s.execErr != nil { + return s.execRes, s.execErr + } + + ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) + ctx = session.NewContext(ctx, s.st.getSessionID()) + + // no cache hit. start evaluating the node + span, ctx := tracing.StartSpan(ctx, s.st.vtx.Name()) + notifyStarted(ctx, &s.st.clientVertex, false) + defer func() { + tracing.FinishWithError(span, retErr) + notifyCompleted(ctx, &s.st.clientVertex, retErr, false) + }() + + res, err := op.Exec(ctx, inputs) + complete := true + if err != nil { + select { + case <-ctx.Done(): + if strings.Contains(err.Error(), context.Canceled.Error()) { + complete = false + err = errors.Wrap(ctx.Err(), err.Error()) + } + default: + } + } + if complete { + if res != nil { + var subExporters []ExportableCacheKey + s.subBuilder.mu.Lock() + if len(s.subBuilder.exporters) > 0 { + subExporters = append(subExporters, s.subBuilder.exporters...) + } + s.subBuilder.mu.Unlock() + + s.execRes = &execRes{execRes: wrapShared(res), execExporters: subExporters} + } + s.execErr = err + } + return s.execRes, err + }) + if err != nil { + return nil, nil, err + } + r := res.(*execRes) + return unwrapShared(r.execRes), r.execExporters, nil +} + +func (s *sharedOp) getOp() (Op, error) { + s.opOnce.Do(func() { + s.subBuilder = s.st.builder() + s.op, s.err = s.resolver(s.st.vtx, s.subBuilder) + }) + if s.err != nil { + return nil, s.err + } + return s.op, nil +} + +func (s *sharedOp) release() { + if s.execRes != nil { + for _, r := range s.execRes.execRes { + go r.Release(context.TODO()) + } + } +} + +func initClientVertex(v Vertex) client.Vertex { + inputDigests := make([]digest.Digest, 0, len(v.Inputs())) + for _, inp := range v.Inputs() { + inputDigests = append(inputDigests, inp.Vertex.Digest()) + } + return client.Vertex{ + Inputs: inputDigests, + Name: v.Name(), + Digest: v.Digest(), + } +} + +func wrapShared(inp []Result) []*SharedResult { + out := make([]*SharedResult, len(inp)) + for i, r := range inp { + out[i] = NewSharedResult(r) + } + return out +} + +func unwrapShared(inp []*SharedResult) []Result { + out := make([]Result, len(inp)) + for i, r := range inp { + out[i] = r.Clone() + } + return out +} + +type vertexWithCacheOptions struct { + Vertex + inputs []Edge + dgst digest.Digest +} + +func (v *vertexWithCacheOptions) Digest() digest.Digest { + return v.dgst +} + +func (v *vertexWithCacheOptions) Inputs() []Edge { + return v.inputs +} + +func notifyStarted(ctx context.Context, v *client.Vertex, cached bool) { + pw, _, _ := progress.FromContext(ctx) + defer pw.Close() + now := time.Now() + v.Started = &now + v.Completed = nil + v.Cached = cached + pw.Write(v.Digest.String(), *v) +} + +func notifyCompleted(ctx context.Context, v *client.Vertex, err error, cached bool) { + pw, _, _ := progress.FromContext(ctx) + defer pw.Close() + now := time.Now() + if v.Started == nil { + v.Started = &now + } + v.Completed = &now + v.Cached = cached + if err != nil { + v.Error = err.Error() + } + pw.Write(v.Digest.String(), *v) +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go new file mode 100644 index 00000000..c2d9ad2b --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go @@ -0,0 +1,324 @@ +package llbsolver + +import ( + "context" + "fmt" + "io" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/platforms" + "github.com/mitchellh/hashstructure" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/remotecache" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/frontend" + gw "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/util/tracing" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type llbBridge struct { + builder solver.Builder + frontends map[string]frontend.Frontend + resolveWorker func() (worker.Worker, error) + eachWorker func(func(worker.Worker) error) error + resolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc + cms map[string]solver.CacheManager + cmsMu sync.Mutex + platforms []specs.Platform + sm *session.Manager +} + +func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImports []gw.CacheOptionsEntry) (solver.CachedResult, error) { + w, err := b.resolveWorker() + if err != nil { + return nil, err + } + ent, err := loadEntitlements(b.builder) + if err != nil { + return nil, err + } + var cms []solver.CacheManager + for _, im := range cacheImports { + cmID, err := cmKey(im) + if err != nil { + return nil, err + } + b.cmsMu.Lock() + var cm solver.CacheManager + if prevCm, ok := b.cms[cmID]; !ok { + func(cmID string, im gw.CacheOptionsEntry) { + cm = newLazyCacheManager(cmID, func() (solver.CacheManager, error) { + var cmNew solver.CacheManager + if err := inVertexContext(b.builder.Context(context.TODO()), "importing cache manifest from "+cmID, "", func(ctx context.Context) error { + resolveCI, ok := b.resolveCacheImporterFuncs[im.Type] + if !ok { + return errors.Errorf("unknown cache importer: %s", im.Type) + } + ci, desc, err := resolveCI(ctx, im.Attrs) + if err != nil { + return err + } + cmNew, err = ci.Resolve(ctx, desc, cmID, w) + return err + }); err != nil { + logrus.Debugf("error while importing cache manifest from cmId=%s: %v", cmID, err) + return nil, err + } + return cmNew, nil + }) + }(cmID, im) + b.cms[cmID] = cm + } else { + cm = prevCm + } + cms = append(cms, cm) + b.cmsMu.Unlock() + } + dpc := &detectPrunedCacheID{} + + edge, err := Load(def, dpc.Load, ValidateEntitlements(ent), WithCacheSources(cms), RuntimePlatforms(b.platforms), WithValidateCaps()) + if err != nil { + return nil, errors.Wrap(err, "failed to load LLB") + } + + if len(dpc.ids) > 0 { + ids := make([]string, 0, len(dpc.ids)) + for id := range dpc.ids { + ids = append(ids, id) + } + if err := b.eachWorker(func(w worker.Worker) error { + return w.PruneCacheMounts(ctx, ids) + }); err != nil { + return nil, err + } + } + + res, err := b.builder.Build(ctx, edge) + if err != nil { + return nil, err + } + wr, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference for exporting: %T", res.Sys()) + } + if wr.ImmutableRef != nil { + if err := wr.ImmutableRef.Finalize(ctx, false); err != nil { + return nil, err + } + } + return res, err +} + +func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res *frontend.Result, err error) { + if req.Definition != nil && req.Definition.Def != nil && req.Frontend != "" { + return nil, errors.New("cannot solve with both Definition and Frontend specified") + } + + if req.Definition != nil && req.Definition.Def != nil { + res = &frontend.Result{Ref: newResultProxy(b, req)} + } else if req.Frontend != "" { + f, ok := b.frontends[req.Frontend] + if !ok { + return nil, errors.Errorf("invalid frontend: %s", req.Frontend) + } + res, err = f.Solve(ctx, b, req.FrontendOpt, req.FrontendInputs) + if err != nil { + return nil, errors.Wrapf(err, "failed to solve with frontend %s", req.Frontend) + } + } else { + return &frontend.Result{}, nil + } + + return +} + +type resultProxy struct { + cb func(context.Context) (solver.CachedResult, error) + def *pb.Definition + g flightcontrol.Group + mu sync.Mutex + released bool + v solver.CachedResult + err error +} + +func newResultProxy(b *llbBridge, req frontend.SolveRequest) *resultProxy { + return &resultProxy{ + def: req.Definition, + cb: func(ctx context.Context) (solver.CachedResult, error) { + return b.loadResult(ctx, req.Definition, req.CacheImports) + }, + } +} + +func (rp *resultProxy) Definition() *pb.Definition { + return rp.def +} + +func (rp *resultProxy) Release(ctx context.Context) error { + rp.mu.Lock() + defer rp.mu.Unlock() + if rp.v != nil { + if rp.released { + logrus.Warnf("release of already released result") + } + if err := rp.v.Release(ctx); err != nil { + return err + } + } + rp.released = true + return nil +} + +func (rp *resultProxy) Result(ctx context.Context) (solver.CachedResult, error) { + r, err := rp.g.Do(ctx, "result", func(ctx context.Context) (interface{}, error) { + rp.mu.Lock() + if rp.released { + rp.mu.Unlock() + return nil, errors.Errorf("accessing released result") + } + if rp.v != nil || rp.err != nil { + rp.mu.Unlock() + return rp.v, rp.err + } + rp.mu.Unlock() + v, err := rp.cb(ctx) + if err != nil { + select { + case <-ctx.Done(): + if strings.Contains(err.Error(), context.Canceled.Error()) { + return v, err + } + default: + } + } + rp.mu.Lock() + if rp.released { + if v != nil { + v.Release(context.TODO()) + } + rp.mu.Unlock() + return nil, errors.Errorf("evaluating released result") + } + rp.v = v + rp.err = err + rp.mu.Unlock() + return v, err + }) + if r != nil { + return r.(solver.CachedResult), nil + } + return nil, err +} + +func (s *llbBridge) Exec(ctx context.Context, meta executor.Meta, root cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) (err error) { + w, err := s.resolveWorker() + if err != nil { + return err + } + span, ctx := tracing.StartSpan(ctx, strings.Join(meta.Args, " ")) + err = w.Exec(ctx, meta, root, stdin, stdout, stderr) + tracing.FinishWithError(span, err) + return err +} + +func (s *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) { + w, err := s.resolveWorker() + if err != nil { + return "", nil, err + } + if opt.LogName == "" { + opt.LogName = fmt.Sprintf("resolve image config for %s", ref) + } + id := ref // make a deterministic ID for avoiding duplicates + if platform := opt.Platform; platform == nil { + id += platforms.Format(platforms.DefaultSpec()) + } else { + id += platforms.Format(*platform) + } + err = inVertexContext(s.builder.Context(ctx), opt.LogName, id, func(ctx context.Context) error { + dgst, config, err = w.ResolveImageConfig(ctx, ref, opt, s.sm) + return err + }) + return dgst, config, err +} + +type lazyCacheManager struct { + id string + main solver.CacheManager + + waitCh chan struct{} + err error +} + +func (lcm *lazyCacheManager) ID() string { + return lcm.id +} +func (lcm *lazyCacheManager) Query(inp []solver.CacheKeyWithSelector, inputIndex solver.Index, dgst digest.Digest, outputIndex solver.Index) ([]*solver.CacheKey, error) { + lcm.wait() + if lcm.main == nil { + return nil, nil + } + return lcm.main.Query(inp, inputIndex, dgst, outputIndex) +} +func (lcm *lazyCacheManager) Records(ck *solver.CacheKey) ([]*solver.CacheRecord, error) { + lcm.wait() + if lcm.main == nil { + return nil, nil + } + return lcm.main.Records(ck) +} +func (lcm *lazyCacheManager) Load(ctx context.Context, rec *solver.CacheRecord) (solver.Result, error) { + if err := lcm.wait(); err != nil { + return nil, err + } + return lcm.main.Load(ctx, rec) +} +func (lcm *lazyCacheManager) Save(key *solver.CacheKey, s solver.Result, createdAt time.Time) (*solver.ExportableCacheKey, error) { + if err := lcm.wait(); err != nil { + return nil, err + } + return lcm.main.Save(key, s, createdAt) +} + +func (lcm *lazyCacheManager) wait() error { + <-lcm.waitCh + return lcm.err +} + +func newLazyCacheManager(id string, fn func() (solver.CacheManager, error)) solver.CacheManager { + lcm := &lazyCacheManager{id: id, waitCh: make(chan struct{})} + go func() { + defer close(lcm.waitCh) + cm, err := fn() + if err != nil { + lcm.err = err + return + } + lcm.main = cm + }() + return lcm +} + +func cmKey(im gw.CacheOptionsEntry) (string, error) { + if im.Type == "registry" && im.Attrs["ref"] != "" { + return im.Attrs["ref"], nil + } + i, err := hashstructure.Hash(im, nil) + if err != nil { + return "", err + } + return fmt.Sprintf("%s:%d", im.Type, i), nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go new file mode 100644 index 00000000..98da375a --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go @@ -0,0 +1,342 @@ +package file + +import ( + "context" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containerd/continuity/fs" + "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" + "github.com/moby/buildkit/solver/pb" + "github.com/pkg/errors" + copy "github.com/tonistiigi/fsutil/copy" +) + +func timestampToTime(ts int64) *time.Time { + if ts == -1 { + return nil + } + tm := time.Unix(ts/1e9, ts%1e9) + return &tm +} + +func mapUserToChowner(user *copy.User, idmap *idtools.IdentityMapping) (copy.Chowner, error) { + if user == nil { + return func(old *copy.User) (*copy.User, error) { + if old == nil { + if idmap == nil { + return nil, nil + } + old = ©.User{} // root + // non-nil old is already mapped + if idmap != nil { + identity, err := idmap.ToHost(idtools.Identity{ + UID: old.Uid, + GID: old.Gid, + }) + if err != nil { + return nil, err + } + return ©.User{Uid: identity.UID, Gid: identity.GID}, nil + } + } + return old, nil + }, nil + } + u := *user + if idmap != nil { + identity, err := idmap.ToHost(idtools.Identity{ + UID: user.Uid, + GID: user.Gid, + }) + if err != nil { + return nil, err + } + u.Uid = identity.UID + u.Gid = identity.GID + } + return func(*copy.User) (*copy.User, error) { + return &u, nil + }, nil +} + +func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.User, idmap *idtools.IdentityMapping) error { + p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path))) + if err != nil { + return err + } + + ch, err := mapUserToChowner(user, idmap) + if err != nil { + return err + } + + if action.MakeParents { + if err := copy.MkdirAll(p, os.FileMode(action.Mode)&0777, ch, timestampToTime(action.Timestamp)); err != nil { + return err + } + } else { + if err := os.Mkdir(p, os.FileMode(action.Mode)&0777); err != nil { + if os.IsExist(err) { + return nil + } + return err + } + if err := copy.Chown(p, nil, ch); err != nil { + return err + } + if err := copy.Utimes(p, timestampToTime(action.Timestamp)); err != nil { + return err + } + } + + return nil +} + +func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *copy.User, idmap *idtools.IdentityMapping) error { + p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path))) + if err != nil { + return err + } + + ch, err := mapUserToChowner(user, idmap) + if err != nil { + return err + } + + if err := ioutil.WriteFile(p, action.Data, os.FileMode(action.Mode)&0777); err != nil { + return err + } + + if err := copy.Chown(p, nil, ch); err != nil { + return err + } + + if err := copy.Utimes(p, timestampToTime(action.Timestamp)); err != nil { + return err + } + + return nil +} + +func rm(ctx context.Context, d string, action pb.FileActionRm) error { + if action.AllowWildcard { + src := cleanPath(action.Path) + m, err := copy.ResolveWildcards(d, src, false) + if err != nil { + return err + } + + for _, s := range m { + if err := rmPath(d, s, action.AllowNotFound); err != nil { + return err + } + } + + return nil + } + + return rmPath(d, action.Path, action.AllowNotFound) +} + +func rmPath(root, src string, allowNotFound bool) error { + p, err := fs.RootPath(root, filepath.Join(filepath.Join("/", src))) + if err != nil { + return err + } + + if err := os.RemoveAll(p); err != nil { + if os.IsNotExist(errors.Cause(err)) && allowNotFound { + return nil + } + return err + } + + return nil +} + +func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *copy.User, idmap *idtools.IdentityMapping) error { + srcPath := cleanPath(action.Src) + destPath := cleanPath(action.Dest) + + if !action.CreateDestPath { + p, err := fs.RootPath(dest, filepath.Join(filepath.Join("/", action.Dest))) + if err != nil { + return err + } + if _, err := os.Lstat(filepath.Dir(p)); err != nil { + return errors.Wrapf(err, "failed to stat %s", action.Dest) + } + } + + xattrErrorHandler := func(dst, src, key string, err error) error { + log.Println(err) + return nil + } + + ch, err := mapUserToChowner(u, idmap) + if err != nil { + return err + } + + opt := []copy.Opt{ + func(ci *copy.CopyInfo) { + ci.Chown = ch + ci.Utime = timestampToTime(action.Timestamp) + if m := int(action.Mode); m != -1 { + ci.Mode = &m + } + ci.CopyDirContents = action.DirCopyContents + ci.FollowLinks = action.FollowSymlink + }, + copy.WithXAttrErrorHandler(xattrErrorHandler), + } + + if !action.AllowWildcard { + if action.AttemptUnpackDockerCompatibility { + if ok, err := unpack(ctx, src, srcPath, dest, destPath, ch, timestampToTime(action.Timestamp)); err != nil { + return err + } else if ok { + return nil + } + } + return copy.Copy(ctx, src, srcPath, dest, destPath, opt...) + } + + m, err := copy.ResolveWildcards(src, srcPath, action.FollowSymlink) + if err != nil { + return err + } + + if len(m) == 0 { + if action.AllowEmptyWildcard { + return nil + } + return errors.Errorf("%s not found", srcPath) + } + + for _, s := range m { + if action.AttemptUnpackDockerCompatibility { + if ok, err := unpack(ctx, src, s, dest, destPath, ch, timestampToTime(action.Timestamp)); err != nil { + return err + } else if ok { + continue + } + } + if err := copy.Copy(ctx, src, s, dest, destPath, opt...); err != nil { + return err + } + } + + return nil +} + +func cleanPath(s string) string { + s2 := filepath.Join("/", s) + if strings.HasSuffix(s, "/.") { + if s2 != "/" { + s2 += "/" + } + s2 += "." + } else if strings.HasSuffix(s, "/") && s2 != "/" { + s2 += "/" + } + return s2 +} + +type Backend struct { +} + +func (fb *Backend) Mkdir(ctx context.Context, m, user, group fileoptypes.Mount, action pb.FileActionMkDir) error { + mnt, ok := m.(*Mount) + if !ok { + return errors.Errorf("invalid mount type %T", m) + } + + lm := snapshot.LocalMounter(mnt.m) + dir, err := lm.Mount() + if err != nil { + return err + } + defer lm.Unmount() + + u, err := readUser(action.Owner, user, group) + if err != nil { + return err + } + + return mkdir(ctx, dir, action, u, mnt.m.IdentityMapping()) +} + +func (fb *Backend) Mkfile(ctx context.Context, m, user, group fileoptypes.Mount, action pb.FileActionMkFile) error { + mnt, ok := m.(*Mount) + if !ok { + return errors.Errorf("invalid mount type %T", m) + } + + lm := snapshot.LocalMounter(mnt.m) + dir, err := lm.Mount() + if err != nil { + return err + } + defer lm.Unmount() + + u, err := readUser(action.Owner, user, group) + if err != nil { + return err + } + + return mkfile(ctx, dir, action, u, mnt.m.IdentityMapping()) +} +func (fb *Backend) Rm(ctx context.Context, m fileoptypes.Mount, action pb.FileActionRm) error { + mnt, ok := m.(*Mount) + if !ok { + return errors.Errorf("invalid mount type %T", m) + } + + lm := snapshot.LocalMounter(mnt.m) + dir, err := lm.Mount() + if err != nil { + return err + } + defer lm.Unmount() + + return rm(ctx, dir, action) +} +func (fb *Backend) Copy(ctx context.Context, m1, m2, user, group fileoptypes.Mount, action pb.FileActionCopy) error { + mnt1, ok := m1.(*Mount) + if !ok { + return errors.Errorf("invalid mount type %T", m1) + } + mnt2, ok := m2.(*Mount) + if !ok { + return errors.Errorf("invalid mount type %T", m2) + } + + lm := snapshot.LocalMounter(mnt1.m) + src, err := lm.Mount() + if err != nil { + return err + } + defer lm.Unmount() + + lm2 := snapshot.LocalMounter(mnt2.m) + dest, err := lm2.Mount() + if err != nil { + return err + } + defer lm2.Unmount() + + u, err := readUser(action.Owner, user, group) + if err != nil { + return err + } + + return docopy(ctx, src, dest, action, u, mnt2.m.IdentityMapping()) +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go new file mode 100644 index 00000000..faa4cdbf --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go @@ -0,0 +1,70 @@ +package file + +import ( + "context" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" + "github.com/pkg/errors" +) + +func NewRefManager(cm cache.Manager) *RefManager { + return &RefManager{cm: cm} +} + +type RefManager struct { + cm cache.Manager +} + +func (rm *RefManager) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly bool) (fileoptypes.Mount, error) { + ir, ok := ref.(cache.ImmutableRef) + if !ok && ref != nil { + return nil, errors.Errorf("invalid ref type: %T", ref) + } + + if ir != nil && readonly { + m, err := ir.Mount(ctx, readonly) + if err != nil { + return nil, err + } + return &Mount{m: m}, nil + } + + mr, err := rm.cm.New(ctx, ir, cache.WithDescription("fileop target"), cache.CachePolicyRetain) + if err != nil { + return nil, err + } + m, err := mr.Mount(ctx, readonly) + if err != nil { + return nil, err + } + return &Mount{m: m, mr: mr}, nil +} + +func (rm *RefManager) Commit(ctx context.Context, mount fileoptypes.Mount) (fileoptypes.Ref, error) { + m, ok := mount.(*Mount) + if !ok { + return nil, errors.Errorf("invalid mount type %T", mount) + } + if m.mr == nil { + return nil, errors.Errorf("invalid mount without active ref for commit") + } + defer func() { + m.mr = nil + }() + return m.mr.Commit(ctx) +} + +type Mount struct { + m snapshot.Mountable + mr cache.MutableRef +} + +func (m *Mount) Release(ctx context.Context) error { + if m.mr != nil { + return m.mr.Release(ctx) + } + return nil +} +func (m *Mount) IsFileOpMount() {} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/unpack.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/unpack.go new file mode 100644 index 00000000..8e530526 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/unpack.go @@ -0,0 +1,61 @@ +package file + +import ( + "archive/tar" + "context" + "os" + "time" + + "github.com/containerd/continuity/fs" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + copy "github.com/tonistiigi/fsutil/copy" +) + +func unpack(ctx context.Context, srcRoot string, src string, destRoot string, dest string, ch copy.Chowner, tm *time.Time) (bool, error) { + src, err := fs.RootPath(srcRoot, src) + if err != nil { + return false, err + } + if !isArchivePath(src) { + return false, nil + } + + dest, err = fs.RootPath(destRoot, dest) + if err != nil { + return false, err + } + if err := copy.MkdirAll(dest, 0755, ch, tm); err != nil { + return false, err + } + + file, err := os.Open(src) + if err != nil { + return false, err + } + defer file.Close() + + return true, chrootarchive.Untar(file, dest, nil) +} + +func isArchivePath(path string) bool { + fi, err := os.Lstat(path) + if err != nil { + return false + } + if fi.Mode()&os.ModeType != 0 { + return false + } + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := archive.DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go new file mode 100644 index 00000000..93b32362 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go @@ -0,0 +1,119 @@ +package file + +import ( + "os" + + "github.com/containerd/continuity/fs" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" + "github.com/moby/buildkit/solver/pb" + "github.com/opencontainers/runc/libcontainer/user" + "github.com/pkg/errors" + copy "github.com/tonistiigi/fsutil/copy" +) + +func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*copy.User, error) { + if chopt == nil { + return nil, nil + } + var us copy.User + if chopt.User != nil { + switch u := chopt.User.User.(type) { + case *pb.UserOpt_ByName: + if mu == nil { + return nil, errors.Errorf("invalid missing user mount") + } + mmu, ok := mu.(*Mount) + if !ok { + return nil, errors.Errorf("invalid mount type %T", mu) + } + lm := snapshot.LocalMounter(mmu.m) + dir, err := lm.Mount() + if err != nil { + return nil, err + } + defer lm.Unmount() + + passwdPath, err := user.GetPasswdPath() + if err != nil { + return nil, err + } + + passwdPath, err = fs.RootPath(dir, passwdPath) + if err != nil { + return nil, err + } + + ufile, err := os.Open(passwdPath) + if err != nil { + return nil, err + } + defer ufile.Close() + + users, err := user.ParsePasswdFilter(ufile, func(uu user.User) bool { + return uu.Name == u.ByName.Name + }) + if err != nil { + return nil, err + } + + if len(users) > 0 { + us.Uid = users[0].Uid + us.Gid = users[0].Gid + } + case *pb.UserOpt_ByID: + us.Uid = int(u.ByID) + us.Gid = int(u.ByID) + } + } + + if chopt.Group != nil { + switch u := chopt.Group.User.(type) { + case *pb.UserOpt_ByName: + if mg == nil { + return nil, errors.Errorf("invalid missing group mount") + } + mmg, ok := mg.(*Mount) + if !ok { + return nil, errors.Errorf("invalid mount type %T", mg) + } + lm := snapshot.LocalMounter(mmg.m) + dir, err := lm.Mount() + if err != nil { + return nil, err + } + defer lm.Unmount() + + groupPath, err := user.GetGroupPath() + if err != nil { + return nil, err + } + + groupPath, err = fs.RootPath(dir, groupPath) + if err != nil { + return nil, err + } + + gfile, err := os.Open(groupPath) + if err != nil { + return nil, err + } + defer gfile.Close() + + groups, err := user.ParseGroupFilter(gfile, func(g user.Group) bool { + return g.Name == u.ByName.Name + }) + if err != nil { + return nil, err + } + + if len(groups) > 0 { + us.Gid = groups[0].Gid + } + case *pb.UserOpt_ByID: + us.Gid = int(u.ByID) + } + } + + return &us, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go new file mode 100644 index 00000000..780b5595 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go @@ -0,0 +1,14 @@ +// +build !linux + +package file + +import ( + "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" + "github.com/moby/buildkit/solver/pb" + "github.com/pkg/errors" + copy "github.com/tonistiigi/fsutil/copy" +) + +func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*copy.User, error) { + return nil, errors.New("only implemented in linux") +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go new file mode 100644 index 00000000..d4121f08 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go @@ -0,0 +1,141 @@ +package ops + +import ( + "context" + "encoding/json" + "os" + + "github.com/containerd/continuity/fs" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +const buildCacheType = "buildkit.build.v0" + +type buildOp struct { + op *pb.BuildOp + b frontend.FrontendLLBBridge + v solver.Vertex +} + +func NewBuildOp(v solver.Vertex, op *pb.Op_Build, b frontend.FrontendLLBBridge, _ worker.Worker) (solver.Op, error) { + if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { + return nil, err + } + return &buildOp{ + op: op.Build, + b: b, + v: v, + }, nil +} + +func (b *buildOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) { + dt, err := json.Marshal(struct { + Type string + Exec *pb.BuildOp + }{ + Type: buildCacheType, + Exec: b.op, + }) + if err != nil { + return nil, false, err + } + + return &solver.CacheMap{ + Digest: digest.FromBytes(dt), + Deps: make([]struct { + Selector digest.Digest + ComputeDigestFunc solver.ResultBasedCacheFunc + }, len(b.v.Inputs())), + }, true, nil +} + +func (b *buildOp) Exec(ctx context.Context, inputs []solver.Result) (outputs []solver.Result, retErr error) { + if b.op.Builder != pb.LLBBuilder { + return nil, errors.Errorf("only LLB builder is currently allowed") + } + + builderInputs := b.op.Inputs + llbDef, ok := builderInputs[pb.LLBDefinitionInput] + if !ok { + return nil, errors.Errorf("no llb definition input %s found", pb.LLBDefinitionInput) + } + + i := int(llbDef.Input) + if i >= len(inputs) { + return nil, errors.Errorf("invalid index %v", i) // TODO: this should be validated before + } + inp := inputs[i] + + ref, ok := inp.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference for build %T", inp.Sys()) + } + + mount, err := ref.ImmutableRef.Mount(ctx, true) + if err != nil { + return nil, err + } + + lm := snapshot.LocalMounter(mount) + + root, err := lm.Mount() + if err != nil { + return nil, err + } + + defer func() { + if retErr != nil && lm != nil { + lm.Unmount() + } + }() + + fn := pb.LLBDefaultDefinitionFile + if override, ok := b.op.Attrs[pb.AttrLLBDefinitionFilename]; ok { + fn = override + } + + newfn, err := fs.RootPath(root, fn) + if err != nil { + return nil, errors.Wrapf(err, "working dir %s points to invalid target", fn) + } + + f, err := os.Open(newfn) + if err != nil { + return nil, errors.Wrapf(err, "failed to open %s", newfn) + } + + def, err := llb.ReadFrom(f) + if err != nil { + f.Close() + return nil, err + } + f.Close() + lm.Unmount() + lm = nil + + newRes, err := b.b.Solve(ctx, frontend.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + + for _, r := range newRes.Refs { + r.Release(context.TODO()) + } + + r, err := newRes.Ref.Result(ctx) + if err != nil { + return nil, err + } + + return []solver.Result{r}, err +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go new file mode 100644 index 00000000..04355e54 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go @@ -0,0 +1,905 @@ +package ops + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/platforms" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/secrets" + "github.com/moby/buildkit/session/sshforward" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/progress/logs" + utilsystem "github.com/moby/buildkit/util/system" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/opencontainers/runc/libcontainer/system" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + bolt "go.etcd.io/bbolt" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const execCacheType = "buildkit.exec.v0" + +type execOp struct { + op *pb.ExecOp + cm cache.Manager + sm *session.Manager + md *metadata.Store + exec executor.Executor + w worker.Worker + platform *pb.Platform + numInputs int + + cacheMounts map[string]*cacheRefShare + cacheMountsMu sync.Mutex +} + +func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, sm *session.Manager, md *metadata.Store, exec executor.Executor, w worker.Worker) (solver.Op, error) { + if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { + return nil, err + } + return &execOp{ + op: op.Exec, + cm: cm, + sm: sm, + md: md, + exec: exec, + numInputs: len(v.Inputs()), + w: w, + platform: platform, + cacheMounts: map[string]*cacheRefShare{}, + }, nil +} + +func cloneExecOp(old *pb.ExecOp) pb.ExecOp { + n := *old + meta := *n.Meta + meta.ExtraHosts = nil + for i := range n.Meta.ExtraHosts { + h := *n.Meta.ExtraHosts[i] + meta.ExtraHosts = append(meta.ExtraHosts, &h) + } + n.Meta = &meta + n.Mounts = nil + for i := range n.Mounts { + m := *n.Mounts[i] + n.Mounts = append(n.Mounts, &m) + } + return n +} + +func (e *execOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) { + op := cloneExecOp(e.op) + for i := range op.Meta.ExtraHosts { + h := op.Meta.ExtraHosts[i] + h.IP = "" + op.Meta.ExtraHosts[i] = h + } + for i := range op.Mounts { + op.Mounts[i].Selector = "" + } + op.Meta.ProxyEnv = nil + + p := platforms.DefaultSpec() + if e.platform != nil { + p = specs.Platform{ + OS: e.platform.OS, + Architecture: e.platform.Architecture, + Variant: e.platform.Variant, + } + } + + dt, err := json.Marshal(struct { + Type string + Exec *pb.ExecOp + OS string + Arch string + Variant string `json:",omitempty"` + }{ + Type: execCacheType, + Exec: &op, + OS: p.OS, + Arch: p.Architecture, + Variant: p.Variant, + }) + if err != nil { + return nil, false, err + } + + cm := &solver.CacheMap{ + Digest: digest.FromBytes(dt), + Deps: make([]struct { + Selector digest.Digest + ComputeDigestFunc solver.ResultBasedCacheFunc + }, e.numInputs), + } + + deps, err := e.getMountDeps() + if err != nil { + return nil, false, err + } + + for i, dep := range deps { + if len(dep.Selectors) != 0 { + dgsts := make([][]byte, 0, len(dep.Selectors)) + for _, p := range dep.Selectors { + dgsts = append(dgsts, []byte(p)) + } + cm.Deps[i].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0})) + } + if !dep.NoContentBasedHash { + cm.Deps[i].ComputeDigestFunc = llbsolver.NewContentHashFunc(toSelectors(dedupePaths(dep.Selectors))) + } + } + + return cm, true, nil +} + +func dedupePaths(inp []string) []string { + old := make(map[string]struct{}, len(inp)) + for _, p := range inp { + old[p] = struct{}{} + } + paths := make([]string, 0, len(old)) + for p1 := range old { + var skip bool + for p2 := range old { + if p1 != p2 && strings.HasPrefix(p1, p2+"/") { + skip = true + break + } + } + if !skip { + paths = append(paths, p1) + } + } + sort.Slice(paths, func(i, j int) bool { + return paths[i] < paths[j] + }) + return paths +} + +func toSelectors(p []string) []llbsolver.Selector { + sel := make([]llbsolver.Selector, 0, len(p)) + for _, p := range p { + sel = append(sel, llbsolver.Selector{Path: p, FollowLinks: true}) + } + return sel +} + +type dep struct { + Selectors []string + NoContentBasedHash bool +} + +func (e *execOp) getMountDeps() ([]dep, error) { + deps := make([]dep, e.numInputs) + for _, m := range e.op.Mounts { + if m.Input == pb.Empty { + continue + } + if int(m.Input) >= len(deps) { + return nil, errors.Errorf("invalid mountinput %v", m) + } + + sel := m.Selector + if sel != "" { + sel = path.Join("/", sel) + deps[m.Input].Selectors = append(deps[m.Input].Selectors, sel) + } + + if (!m.Readonly || m.Dest == pb.RootMount) && m.Output != -1 { // exclude read-only rootfs && read-write mounts + deps[m.Input].NoContentBasedHash = true + } + } + return deps, nil +} + +func (e *execOp) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, m *pb.Mount, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) { + g := &cacheRefGetter{ + locker: &e.cacheMountsMu, + cacheMounts: e.cacheMounts, + cm: e.cm, + md: e.md, + globalCacheRefs: sharedCacheRefs, + name: fmt.Sprintf("cached mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")), + } + return g.getRefCacheDir(ctx, ref, id, sharing) +} + +type cacheRefGetter struct { + locker sync.Locker + cacheMounts map[string]*cacheRefShare + cm cache.Manager + md *metadata.Store + globalCacheRefs *cacheRefs + name string +} + +func (g *cacheRefGetter) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) { + key := "cache-dir:" + id + if ref != nil { + key += ":" + ref.ID() + } + mu := g.locker + mu.Lock() + defer mu.Unlock() + + if ref, ok := g.cacheMounts[key]; ok { + return ref.clone(), nil + } + defer func() { + if err == nil { + share := &cacheRefShare{MutableRef: mref, refs: map[*cacheRef]struct{}{}} + g.cacheMounts[key] = share + mref = share.clone() + } + }() + + switch sharing { + case pb.CacheSharingOpt_SHARED: + return g.globalCacheRefs.get(key, func() (cache.MutableRef, error) { + return g.getRefCacheDirNoCache(ctx, key, ref, id, false) + }) + case pb.CacheSharingOpt_PRIVATE: + return g.getRefCacheDirNoCache(ctx, key, ref, id, false) + case pb.CacheSharingOpt_LOCKED: + return g.getRefCacheDirNoCache(ctx, key, ref, id, true) + default: + return nil, errors.Errorf("invalid cache sharing option: %s", sharing.String()) + } +} + +func (g *cacheRefGetter) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, block bool) (cache.MutableRef, error) { + makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) { + return g.cm.New(ctx, ref, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(g.name), cache.CachePolicyRetain) + } + + cacheRefsLocker.Lock(key) + defer cacheRefsLocker.Unlock(key) + for { + sis, err := g.md.Search(key) + if err != nil { + return nil, err + } + locked := false + for _, si := range sis { + if mRef, err := g.cm.GetMutable(ctx, si.ID()); err == nil { + logrus.Debugf("reusing ref for cache dir: %s", mRef.ID()) + return mRef, nil + } else if errors.Cause(err) == cache.ErrLocked { + locked = true + } + } + if block && locked { + cacheRefsLocker.Unlock(key) + select { + case <-ctx.Done(): + cacheRefsLocker.Lock(key) + return nil, ctx.Err() + case <-time.After(100 * time.Millisecond): + cacheRefsLocker.Lock(key) + } + } else { + break + } + } + mRef, err := makeMutable(ref) + if err != nil { + return nil, err + } + + si, _ := g.md.Get(mRef.ID()) + v, err := metadata.NewValue(key) + if err != nil { + mRef.Release(context.TODO()) + return nil, err + } + v.Index = key + if err := si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, key, v) + }); err != nil { + mRef.Release(context.TODO()) + return nil, err + } + return mRef, nil +} + +func (e *execOp) getSSHMountable(ctx context.Context, m *pb.Mount) (cache.Mountable, error) { + sessionID := session.FromContext(ctx) + if sessionID == "" { + return nil, errors.New("could not access local files without session") + } + + timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + caller, err := e.sm.Get(timeoutCtx, sessionID) + if err != nil { + return nil, err + } + + if err := sshforward.CheckSSHID(ctx, caller, m.SSHOpt.ID); err != nil { + if m.SSHOpt.Optional { + return nil, nil + } + if st, ok := status.FromError(errors.Cause(err)); ok && st.Code() == codes.Unimplemented { + return nil, errors.Errorf("no SSH key %q forwarded from the client", m.SSHOpt.ID) + } + return nil, err + } + + return &sshMount{mount: m, caller: caller, idmap: e.cm.IdentityMapping()}, nil +} + +type sshMount struct { + mount *pb.Mount + caller session.Caller + idmap *idtools.IdentityMapping +} + +func (sm *sshMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { + return &sshMountInstance{sm: sm, idmap: sm.idmap}, nil +} + +type sshMountInstance struct { + sm *sshMount + idmap *idtools.IdentityMapping +} + +func (sm *sshMountInstance) Mount() ([]mount.Mount, func() error, error) { + ctx, cancel := context.WithCancel(context.TODO()) + + uid := int(sm.sm.mount.SSHOpt.Uid) + gid := int(sm.sm.mount.SSHOpt.Gid) + + if sm.idmap != nil { + identity, err := sm.idmap.ToHost(idtools.Identity{ + UID: uid, + GID: gid, + }) + if err != nil { + return nil, nil, err + } + uid = identity.UID + gid = identity.GID + } + + sock, cleanup, err := sshforward.MountSSHSocket(ctx, sm.sm.caller, sshforward.SocketOpt{ + ID: sm.sm.mount.SSHOpt.ID, + UID: uid, + GID: gid, + Mode: int(sm.sm.mount.SSHOpt.Mode & 0777), + }) + if err != nil { + cancel() + return nil, nil, err + } + release := func() error { + var err error + if cleanup != nil { + err = cleanup() + } + cancel() + return err + } + + return []mount.Mount{{ + Type: "bind", + Source: sock, + Options: []string{"rbind"}, + }}, release, nil +} + +func (sm *sshMountInstance) IdentityMapping() *idtools.IdentityMapping { + return sm.idmap +} + +func (e *execOp) getSecretMountable(ctx context.Context, m *pb.Mount) (cache.Mountable, error) { + if m.SecretOpt == nil { + return nil, errors.Errorf("invalid sercet mount options") + } + sopt := *m.SecretOpt + + id := sopt.ID + if id == "" { + return nil, errors.Errorf("secret ID missing from mount options") + } + + sessionID := session.FromContext(ctx) + if sessionID == "" { + return nil, errors.New("could not access local files without session") + } + + timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + caller, err := e.sm.Get(timeoutCtx, sessionID) + if err != nil { + return nil, err + } + + dt, err := secrets.GetSecret(ctx, caller, id) + if err != nil { + if errors.Cause(err) == secrets.ErrNotFound && m.SecretOpt.Optional { + return nil, nil + } + return nil, err + } + + return &secretMount{mount: m, data: dt, idmap: e.cm.IdentityMapping()}, nil +} + +type secretMount struct { + mount *pb.Mount + data []byte + idmap *idtools.IdentityMapping +} + +func (sm *secretMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { + return &secretMountInstance{sm: sm, idmap: sm.idmap}, nil +} + +type secretMountInstance struct { + sm *secretMount + root string + idmap *idtools.IdentityMapping +} + +func (sm *secretMountInstance) Mount() ([]mount.Mount, func() error, error) { + dir, err := ioutil.TempDir("", "buildkit-secrets") + if err != nil { + return nil, nil, errors.Wrap(err, "failed to create temp dir") + } + cleanupDir := func() error { + return os.RemoveAll(dir) + } + + if err := os.Chmod(dir, 0711); err != nil { + cleanupDir() + return nil, nil, err + } + + tmpMount := mount.Mount{ + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nodev", "nosuid", "noexec", fmt.Sprintf("uid=%d,gid=%d", os.Geteuid(), os.Getegid())}, + } + + if system.RunningInUserNS() { + tmpMount.Options = nil + } + + if err := mount.All([]mount.Mount{tmpMount}, dir); err != nil { + cleanupDir() + return nil, nil, errors.Wrap(err, "unable to setup secret mount") + } + sm.root = dir + + cleanup := func() error { + if err := mount.Unmount(dir, 0); err != nil { + return err + } + return cleanupDir() + } + + randID := identity.NewID() + fp := filepath.Join(dir, randID) + if err := ioutil.WriteFile(fp, sm.sm.data, 0600); err != nil { + cleanup() + return nil, nil, err + } + + uid := int(sm.sm.mount.SecretOpt.Uid) + gid := int(sm.sm.mount.SecretOpt.Gid) + + if sm.idmap != nil { + identity, err := sm.idmap.ToHost(idtools.Identity{ + UID: uid, + GID: gid, + }) + if err != nil { + cleanup() + return nil, nil, err + } + uid = identity.UID + gid = identity.GID + } + + if err := os.Chown(fp, uid, gid); err != nil { + cleanup() + return nil, nil, err + } + + if err := os.Chmod(fp, os.FileMode(sm.sm.mount.SecretOpt.Mode&0777)); err != nil { + cleanup() + return nil, nil, err + } + + return []mount.Mount{{ + Type: "bind", + Source: fp, + Options: []string{"ro", "rbind", "nodev", "nosuid", "noexec"}, + }}, cleanup, nil +} + +func (sm *secretMountInstance) IdentityMapping() *idtools.IdentityMapping { + return sm.idmap +} + +func addDefaultEnvvar(env []string, k, v string) []string { + for _, e := range env { + if strings.HasPrefix(e, k+"=") { + return env + } + } + return append(env, k+"="+v) +} + +func (e *execOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Result, error) { + var mounts []executor.Mount + var root cache.Mountable + var readonlyRootFS bool + + var outputs []cache.Ref + + defer func() { + for _, o := range outputs { + if o != nil { + go o.Release(context.TODO()) + } + } + }() + + // loop over all mounts, fill in mounts, root and outputs + for _, m := range e.op.Mounts { + var mountable cache.Mountable + var ref cache.ImmutableRef + + if m.Dest == pb.RootMount && m.MountType != pb.MountType_BIND { + return nil, errors.Errorf("invalid mount type %s for %s", m.MountType.String(), m.Dest) + } + + // if mount is based on input validate and load it + if m.Input != pb.Empty { + if int(m.Input) > len(inputs) { + return nil, errors.Errorf("missing input %d", m.Input) + } + inp := inputs[int(m.Input)] + workerRef, ok := inp.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference for exec %T", inp.Sys()) + } + ref = workerRef.ImmutableRef + mountable = ref + } + + makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) { + desc := fmt.Sprintf("mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")) + return e.cm.New(ctx, ref, cache.WithDescription(desc)) + } + + switch m.MountType { + case pb.MountType_BIND: + // if mount creates an output + if m.Output != pb.SkipOutput { + // it it is readonly and not root then output is the input + if m.Readonly && ref != nil && m.Dest != pb.RootMount { + outputs = append(outputs, ref.Clone()) + } else { + // otherwise output and mount is the mutable child + active, err := makeMutable(ref) + if err != nil { + return nil, err + } + outputs = append(outputs, active) + mountable = active + } + } else if (!m.Readonly || ref == nil) && m.Dest != pb.RootMount { + // this case is empty readonly scratch without output that is not really useful for anything but don't error + active, err := makeMutable(ref) + if err != nil { + return nil, err + } + defer active.Release(context.TODO()) + mountable = active + } + + case pb.MountType_CACHE: + if m.CacheOpt == nil { + return nil, errors.Errorf("missing cache mount options") + } + mRef, err := e.getRefCacheDir(ctx, ref, m.CacheOpt.ID, m, m.CacheOpt.Sharing) + if err != nil { + return nil, err + } + mountable = mRef + defer func() { + go mRef.Release(context.TODO()) + }() + if m.Output != pb.SkipOutput && ref != nil { + outputs = append(outputs, ref.Clone()) + } + + case pb.MountType_TMPFS: + mountable = newTmpfs(e.cm.IdentityMapping()) + + case pb.MountType_SECRET: + secretMount, err := e.getSecretMountable(ctx, m) + if err != nil { + return nil, err + } + if secretMount == nil { + continue + } + mountable = secretMount + + case pb.MountType_SSH: + sshMount, err := e.getSSHMountable(ctx, m) + if err != nil { + return nil, err + } + if sshMount == nil { + continue + } + mountable = sshMount + + default: + return nil, errors.Errorf("mount type %s not implemented", m.MountType) + } + + // validate that there is a mount + if mountable == nil { + return nil, errors.Errorf("mount %s has no input", m.Dest) + } + + // if dest is root we need mutable ref even if there is no output + if m.Dest == pb.RootMount { + root = mountable + readonlyRootFS = m.Readonly + if m.Output == pb.SkipOutput && readonlyRootFS { + active, err := makeMutable(ref) + if err != nil { + return nil, err + } + defer func() { + go active.Release(context.TODO()) + }() + root = active + } + } else { + mounts = append(mounts, executor.Mount{Src: mountable, Dest: m.Dest, Readonly: m.Readonly, Selector: m.Selector}) + } + } + + // sort mounts so parents are mounted first + sort.Slice(mounts, func(i, j int) bool { + return mounts[i].Dest < mounts[j].Dest + }) + + extraHosts, err := parseExtraHosts(e.op.Meta.ExtraHosts) + if err != nil { + return nil, err + } + + meta := executor.Meta{ + Args: e.op.Meta.Args, + Env: e.op.Meta.Env, + Cwd: e.op.Meta.Cwd, + User: e.op.Meta.User, + ReadonlyRootFS: readonlyRootFS, + ExtraHosts: extraHosts, + NetMode: e.op.Network, + SecurityMode: e.op.Security, + } + + if e.op.Meta.ProxyEnv != nil { + meta.Env = append(meta.Env, proxyEnvList(e.op.Meta.ProxyEnv)...) + } + meta.Env = addDefaultEnvvar(meta.Env, "PATH", utilsystem.DefaultPathEnv) + + stdout, stderr := logs.NewLogStreams(ctx, os.Getenv("BUILDKIT_DEBUG_EXEC_OUTPUT") == "1") + defer stdout.Close() + defer stderr.Close() + + if err := e.exec.Exec(ctx, meta, root, mounts, nil, stdout, stderr); err != nil { + return nil, errors.Wrapf(err, "executor failed running %v", meta.Args) + } + + refs := []solver.Result{} + for i, out := range outputs { + if mutable, ok := out.(cache.MutableRef); ok { + ref, err := mutable.Commit(ctx) + if err != nil { + return nil, errors.Wrapf(err, "error committing %s", mutable.ID()) + } + refs = append(refs, worker.NewWorkerRefResult(ref, e.w)) + } else { + refs = append(refs, worker.NewWorkerRefResult(out.(cache.ImmutableRef), e.w)) + } + outputs[i] = nil + } + return refs, nil +} + +func proxyEnvList(p *pb.ProxyEnv) []string { + out := []string{} + if v := p.HttpProxy; v != "" { + out = append(out, "HTTP_PROXY="+v, "http_proxy="+v) + } + if v := p.HttpsProxy; v != "" { + out = append(out, "HTTPS_PROXY="+v, "https_proxy="+v) + } + if v := p.FtpProxy; v != "" { + out = append(out, "FTP_PROXY="+v, "ftp_proxy="+v) + } + if v := p.NoProxy; v != "" { + out = append(out, "NO_PROXY="+v, "no_proxy="+v) + } + return out +} + +func newTmpfs(idmap *idtools.IdentityMapping) cache.Mountable { + return &tmpfs{idmap: idmap} +} + +type tmpfs struct { + idmap *idtools.IdentityMapping +} + +func (f *tmpfs) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { + return &tmpfsMount{readonly: readonly, idmap: f.idmap}, nil +} + +type tmpfsMount struct { + readonly bool + idmap *idtools.IdentityMapping +} + +func (m *tmpfsMount) Mount() ([]mount.Mount, func() error, error) { + opt := []string{"nosuid"} + if m.readonly { + opt = append(opt, "ro") + } + return []mount.Mount{{ + Type: "tmpfs", + Source: "tmpfs", + Options: opt, + }}, func() error { return nil }, nil +} + +func (m *tmpfsMount) IdentityMapping() *idtools.IdentityMapping { + return m.idmap +} + +var cacheRefsLocker = locker.New() +var sharedCacheRefs = &cacheRefs{} + +type cacheRefs struct { + mu sync.Mutex + shares map[string]*cacheRefShare +} + +// ClearActiveCacheMounts clears shared cache mounts currently in use. +// Caller needs to hold CacheMountsLocker before calling +func ClearActiveCacheMounts() { + sharedCacheRefs.shares = nil +} + +func CacheMountsLocker() sync.Locker { + return &sharedCacheRefs.mu +} + +func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.shares == nil { + r.shares = map[string]*cacheRefShare{} + } + + share, ok := r.shares[key] + if ok { + return share.clone(), nil + } + + mref, err := fn() + if err != nil { + return nil, err + } + + share = &cacheRefShare{MutableRef: mref, main: r, key: key, refs: map[*cacheRef]struct{}{}} + r.shares[key] = share + return share.clone(), nil +} + +type cacheRefShare struct { + cache.MutableRef + mu sync.Mutex + refs map[*cacheRef]struct{} + main *cacheRefs + key string +} + +func (r *cacheRefShare) clone() cache.MutableRef { + cacheRef := &cacheRef{cacheRefShare: r} + if cacheRefCloneHijack != nil { + cacheRefCloneHijack() + } + r.mu.Lock() + r.refs[cacheRef] = struct{}{} + r.mu.Unlock() + return cacheRef +} + +func (r *cacheRefShare) release(ctx context.Context) error { + if r.main != nil { + delete(r.main.shares, r.key) + } + return r.MutableRef.Release(ctx) +} + +var cacheRefReleaseHijack func() +var cacheRefCloneHijack func() + +type cacheRef struct { + *cacheRefShare +} + +func (r *cacheRef) Release(ctx context.Context) error { + if r.main != nil { + r.main.mu.Lock() + defer r.main.mu.Unlock() + } + r.mu.Lock() + defer r.mu.Unlock() + delete(r.refs, r) + if len(r.refs) == 0 { + if cacheRefReleaseHijack != nil { + cacheRefReleaseHijack() + } + return r.release(ctx) + } + return nil +} + +func parseExtraHosts(ips []*pb.HostIP) ([]executor.HostIP, error) { + out := make([]executor.HostIP, len(ips)) + for i, hip := range ips { + ip := net.ParseIP(hip.IP) + if ip == nil { + return nil, errors.Errorf("failed to parse IP %s", hip.IP) + } + out[i] = executor.HostIP{ + IP: ip, + Host: hip.Host, + } + } + return out, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go new file mode 100644 index 00000000..5a37411a --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go @@ -0,0 +1,583 @@ +package ops + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "path" + "runtime" + "sort" + "sync" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/file" + "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +const fileCacheType = "buildkit.file.v0" + +type fileOp struct { + op *pb.FileOp + md *metadata.Store + w worker.Worker + solver *FileOpSolver + numInputs int +} + +func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, md *metadata.Store, w worker.Worker) (solver.Op, error) { + if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { + return nil, err + } + return &fileOp{ + op: op.File, + md: md, + numInputs: len(v.Inputs()), + w: w, + solver: NewFileOpSolver(&file.Backend{}, file.NewRefManager(cm)), + }, nil +} + +func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) { + selectors := map[int]map[llbsolver.Selector]struct{}{} + invalidSelectors := map[int]struct{}{} + + actions := make([][]byte, 0, len(f.op.Actions)) + + markInvalid := func(idx pb.InputIndex) { + if idx != -1 { + invalidSelectors[int(idx)] = struct{}{} + } + } + + for _, action := range f.op.Actions { + var dt []byte + var err error + switch a := action.Action.(type) { + case *pb.FileAction_Mkdir: + p := *a.Mkdir + markInvalid(action.Input) + processOwner(p.Owner, selectors) + dt, err = json.Marshal(p) + if err != nil { + return nil, false, err + } + case *pb.FileAction_Mkfile: + p := *a.Mkfile + markInvalid(action.Input) + processOwner(p.Owner, selectors) + dt, err = json.Marshal(p) + if err != nil { + return nil, false, err + } + case *pb.FileAction_Rm: + p := *a.Rm + markInvalid(action.Input) + dt, err = json.Marshal(p) + if err != nil { + return nil, false, err + } + case *pb.FileAction_Copy: + p := *a.Copy + markInvalid(action.Input) + processOwner(p.Owner, selectors) + if action.SecondaryInput != -1 && int(action.SecondaryInput) < f.numInputs { + addSelector(selectors, int(action.SecondaryInput), p.Src, p.AllowWildcard, p.FollowSymlink) + p.Src = path.Base(p.Src) + } + dt, err = json.Marshal(p) + if err != nil { + return nil, false, err + } + } + + actions = append(actions, dt) + } + + dt, err := json.Marshal(struct { + Type string + Actions [][]byte + }{ + Type: fileCacheType, + Actions: actions, + }) + if err != nil { + return nil, false, err + } + + cm := &solver.CacheMap{ + Digest: digest.FromBytes(dt), + Deps: make([]struct { + Selector digest.Digest + ComputeDigestFunc solver.ResultBasedCacheFunc + }, f.numInputs), + } + + for idx, m := range selectors { + if _, ok := invalidSelectors[idx]; ok { + continue + } + dgsts := make([][]byte, 0, len(m)) + for k := range m { + dgsts = append(dgsts, []byte(k.Path)) + } + sort.Slice(dgsts, func(i, j int) bool { + return bytes.Compare(dgsts[i], dgsts[j]) > 0 + }) + cm.Deps[idx].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0})) + + cm.Deps[idx].ComputeDigestFunc = llbsolver.NewContentHashFunc(dedupeSelectors(m)) + } + + return cm, true, nil +} + +func (f *fileOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Result, error) { + inpRefs := make([]fileoptypes.Ref, 0, len(inputs)) + for _, inp := range inputs { + workerRef, ok := inp.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference for exec %T", inp.Sys()) + } + inpRefs = append(inpRefs, workerRef.ImmutableRef) + } + + outs, err := f.solver.Solve(ctx, inpRefs, f.op.Actions) + if err != nil { + return nil, err + } + + outResults := make([]solver.Result, 0, len(outs)) + for _, out := range outs { + outResults = append(outResults, worker.NewWorkerRefResult(out.(cache.ImmutableRef), f.w)) + } + + return outResults, nil +} + +func addSelector(m map[int]map[llbsolver.Selector]struct{}, idx int, sel string, wildcard, followLinks bool) { + mm, ok := m[idx] + if !ok { + mm = map[llbsolver.Selector]struct{}{} + m[idx] = mm + } + s := llbsolver.Selector{Path: sel} + + if wildcard && containsWildcards(sel) { + s.Wildcard = true + } + if followLinks { + s.FollowLinks = true + } + mm[s] = struct{}{} +} + +func containsWildcards(name string) bool { + isWindows := runtime.GOOS == "windows" + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' && !isWindows { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func dedupeSelectors(m map[llbsolver.Selector]struct{}) []llbsolver.Selector { + paths := make([]string, 0, len(m)) + pathsFollow := make([]string, 0, len(m)) + for sel := range m { + if !sel.Wildcard { + if sel.FollowLinks { + pathsFollow = append(pathsFollow, sel.Path) + } else { + paths = append(paths, sel.Path) + } + } + } + paths = dedupePaths(paths) + pathsFollow = dedupePaths(pathsFollow) + selectors := make([]llbsolver.Selector, 0, len(m)) + + for _, p := range paths { + selectors = append(selectors, llbsolver.Selector{Path: p}) + } + for _, p := range pathsFollow { + selectors = append(selectors, llbsolver.Selector{Path: p, FollowLinks: true}) + } + + for sel := range m { + if sel.Wildcard { + selectors = append(selectors, sel) + } + } + + sort.Slice(selectors, func(i, j int) bool { + return selectors[i].Path < selectors[j].Path + }) + + return selectors +} + +func processOwner(chopt *pb.ChownOpt, selectors map[int]map[llbsolver.Selector]struct{}) error { + if chopt == nil { + return nil + } + if chopt.User != nil { + if u, ok := chopt.User.User.(*pb.UserOpt_ByName); ok { + if u.ByName.Input < 0 { + return errors.Errorf("invalid user index %d", u.ByName.Input) + } + addSelector(selectors, int(u.ByName.Input), "/etc/passwd", false, true) + } + } + if chopt.Group != nil { + if u, ok := chopt.Group.User.(*pb.UserOpt_ByName); ok { + if u.ByName.Input < 0 { + return errors.Errorf("invalid user index %d", u.ByName.Input) + } + addSelector(selectors, int(u.ByName.Input), "/etc/group", false, true) + } + } + return nil +} + +func NewFileOpSolver(b fileoptypes.Backend, r fileoptypes.RefManager) *FileOpSolver { + return &FileOpSolver{ + b: b, + r: r, + outs: map[int]int{}, + ins: map[int]input{}, + } +} + +type FileOpSolver struct { + b fileoptypes.Backend + r fileoptypes.RefManager + + mu sync.Mutex + outs map[int]int + ins map[int]input + g flightcontrol.Group +} + +type input struct { + requiresCommit bool + mount fileoptypes.Mount + ref fileoptypes.Ref +} + +func (s *FileOpSolver) Solve(ctx context.Context, inputs []fileoptypes.Ref, actions []*pb.FileAction) ([]fileoptypes.Ref, error) { + for i, a := range actions { + if int(a.Input) < -1 || int(a.Input) >= len(inputs)+len(actions) { + return nil, errors.Errorf("invalid input index %d, %d provided", a.Input, len(inputs)+len(actions)) + } + if int(a.SecondaryInput) < -1 || int(a.SecondaryInput) >= len(inputs)+len(actions) { + return nil, errors.Errorf("invalid secondary input index %d, %d provided", a.Input, len(inputs)) + } + + inp, ok := s.ins[int(a.Input)] + if ok { + inp.requiresCommit = true + } + s.ins[int(a.Input)] = inp + + inp, ok = s.ins[int(a.SecondaryInput)] + if ok { + inp.requiresCommit = true + } + s.ins[int(a.SecondaryInput)] = inp + + if a.Output != -1 { + if _, ok := s.outs[int(a.Output)]; ok { + return nil, errors.Errorf("duplicate output %d", a.Output) + } + idx := len(inputs) + i + s.outs[int(a.Output)] = idx + s.ins[idx] = input{requiresCommit: true} + } + } + + if len(s.outs) == 0 { + return nil, errors.Errorf("no outputs specified") + } + + for i := 0; i < len(s.outs); i++ { + if _, ok := s.outs[i]; !ok { + return nil, errors.Errorf("missing output index %d", i) + } + } + + defer func() { + for _, in := range s.ins { + if in.ref == nil && in.mount != nil { + in.mount.Release(context.TODO()) + } + } + }() + + outs := make([]fileoptypes.Ref, len(s.outs)) + + eg, ctx := errgroup.WithContext(ctx) + for i, idx := range s.outs { + func(i, idx int) { + eg.Go(func() error { + if err := s.validate(idx, inputs, actions, nil); err != nil { + return err + } + inp, err := s.getInput(ctx, idx, inputs, actions) + if err != nil { + return err + } + outs[i] = inp.ref + return nil + }) + }(i, idx) + } + + if err := eg.Wait(); err != nil { + for _, r := range outs { + if r != nil { + r.Release(context.TODO()) + } + } + return nil, err + } + + return outs, nil +} + +func (s *FileOpSolver) validate(idx int, inputs []fileoptypes.Ref, actions []*pb.FileAction, loaded []int) error { + for _, check := range loaded { + if idx == check { + return errors.Errorf("loop from index %d", idx) + } + } + if idx < len(inputs) { + return nil + } + loaded = append(loaded, idx) + action := actions[idx-len(inputs)] + for _, inp := range []int{int(action.Input), int(action.SecondaryInput)} { + if err := s.validate(inp, inputs, actions, loaded); err != nil { + return err + } + } + return nil +} + +func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptypes.Ref, actions []*pb.FileAction) (input, error) { + inp, err := s.g.Do(ctx, fmt.Sprintf("inp-%d", idx), func(ctx context.Context) (_ interface{}, err error) { + s.mu.Lock() + inp := s.ins[idx] + s.mu.Unlock() + if inp.mount != nil || inp.ref != nil { + return inp, nil + } + + if idx < len(inputs) { + inp.ref = inputs[idx] + s.mu.Lock() + s.ins[idx] = inp + s.mu.Unlock() + return inp, nil + } + + var inpMount, inpMountSecondary fileoptypes.Mount + var toRelease []fileoptypes.Mount + var inpMountPrepared bool + defer func() { + for _, m := range toRelease { + m.Release(context.TODO()) + } + if err != nil && inpMount != nil && inpMountPrepared { + inpMount.Release(context.TODO()) + } + }() + + action := actions[idx-len(inputs)] + + loadInput := func(ctx context.Context) func() error { + return func() error { + inp, err := s.getInput(ctx, int(action.Input), inputs, actions) + if err != nil { + return err + } + if inp.ref != nil { + m, err := s.r.Prepare(ctx, inp.ref, false) + if err != nil { + return err + } + inpMount = m + inpMountPrepared = true + return nil + } + inpMount = inp.mount + return nil + } + } + + loadSecondaryInput := func(ctx context.Context) func() error { + return func() error { + inp, err := s.getInput(ctx, int(action.SecondaryInput), inputs, actions) + if err != nil { + return err + } + if inp.ref != nil { + m, err := s.r.Prepare(ctx, inp.ref, true) + if err != nil { + return err + } + inpMountSecondary = m + toRelease = append(toRelease, m) + return nil + } + inpMountSecondary = inp.mount + return nil + } + } + + loadUser := func(ctx context.Context, uopt *pb.UserOpt) (fileoptypes.Mount, error) { + if uopt == nil { + return nil, nil + } + switch u := uopt.User.(type) { + case *pb.UserOpt_ByName: + var m fileoptypes.Mount + if u.ByName.Input < 0 { + return nil, errors.Errorf("invalid user index: %d", u.ByName.Input) + } + inp, err := s.getInput(ctx, int(u.ByName.Input), inputs, actions) + if err != nil { + return nil, err + } + if inp.ref != nil { + mm, err := s.r.Prepare(ctx, inp.ref, true) + if err != nil { + return nil, err + } + toRelease = append(toRelease, mm) + m = mm + } else { + m = inp.mount + } + return m, nil + default: + return nil, nil + } + } + + loadOwner := func(ctx context.Context, chopt *pb.ChownOpt) (fileoptypes.Mount, fileoptypes.Mount, error) { + if chopt == nil { + return nil, nil, nil + } + um, err := loadUser(ctx, chopt.User) + if err != nil { + return nil, nil, err + } + gm, err := loadUser(ctx, chopt.Group) + if err != nil { + return nil, nil, err + } + return um, gm, nil + } + + if action.Input != -1 && action.SecondaryInput != -1 { + eg, ctx := errgroup.WithContext(ctx) + eg.Go(loadInput(ctx)) + eg.Go(loadSecondaryInput(ctx)) + if err := eg.Wait(); err != nil { + return nil, err + } + } else { + if action.Input != -1 { + if err := loadInput(ctx)(); err != nil { + return nil, err + } + } + if action.SecondaryInput != -1 { + if err := loadSecondaryInput(ctx)(); err != nil { + return nil, err + } + } + } + + if inpMount == nil { + m, err := s.r.Prepare(ctx, nil, false) + if err != nil { + return nil, err + } + inpMount = m + inpMountPrepared = true + } + + switch a := action.Action.(type) { + case *pb.FileAction_Mkdir: + user, group, err := loadOwner(ctx, a.Mkdir.Owner) + if err != nil { + return nil, err + } + if err := s.b.Mkdir(ctx, inpMount, user, group, *a.Mkdir); err != nil { + return nil, err + } + case *pb.FileAction_Mkfile: + user, group, err := loadOwner(ctx, a.Mkfile.Owner) + if err != nil { + return nil, err + } + if err := s.b.Mkfile(ctx, inpMount, user, group, *a.Mkfile); err != nil { + return nil, err + } + case *pb.FileAction_Rm: + if err := s.b.Rm(ctx, inpMount, *a.Rm); err != nil { + return nil, err + } + case *pb.FileAction_Copy: + if inpMountSecondary == nil { + m, err := s.r.Prepare(ctx, nil, true) + if err != nil { + return nil, err + } + inpMountSecondary = m + } + user, group, err := loadOwner(ctx, a.Copy.Owner) + if err != nil { + return nil, err + } + if err := s.b.Copy(ctx, inpMountSecondary, inpMount, user, group, *a.Copy); err != nil { + return nil, err + } + default: + return nil, errors.Errorf("invalid action type %T", action.Action) + } + + if inp.requiresCommit { + ref, err := s.r.Commit(ctx, inpMount) + if err != nil { + return nil, err + } + inp.ref = ref + } else { + inp.mount = inpMount + } + s.mu.Lock() + s.ins[idx] = inp + s.mu.Unlock() + return inp, nil + }) + if err != nil { + return input{}, err + } + return inp.(input), err +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes/types.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes/types.go new file mode 100644 index 00000000..67aab026 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes/types.go @@ -0,0 +1,28 @@ +package fileoptypes + +import ( + "context" + + "github.com/moby/buildkit/solver/pb" +) + +type Ref interface { + Release(context.Context) error +} + +type Mount interface { + IsFileOpMount() + Release(context.Context) error +} + +type Backend interface { + Mkdir(context.Context, Mount, Mount, Mount, pb.FileActionMkDir) error + Mkfile(context.Context, Mount, Mount, Mount, pb.FileActionMkFile) error + Rm(context.Context, Mount, pb.FileActionRm) error + Copy(context.Context, Mount, Mount, Mount, Mount, pb.FileActionCopy) error +} + +type RefManager interface { + Prepare(ctx context.Context, ref Ref, readonly bool) (Mount, error) + Commit(ctx context.Context, mount Mount) (Ref, error) +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go new file mode 100644 index 00000000..f0a8cf8a --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go @@ -0,0 +1,92 @@ +package ops + +import ( + "context" + "strings" + "sync" + + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" +) + +const sourceCacheType = "buildkit.source.v0" + +type sourceOp struct { + mu sync.Mutex + op *pb.Op_Source + platform *pb.Platform + sm *source.Manager + src source.SourceInstance + sessM *session.Manager + w worker.Worker +} + +func NewSourceOp(_ solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm *source.Manager, sessM *session.Manager, w worker.Worker) (solver.Op, error) { + if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { + return nil, err + } + return &sourceOp{ + op: op, + sm: sm, + w: w, + sessM: sessM, + platform: platform, + }, nil +} + +func (s *sourceOp) instance(ctx context.Context) (source.SourceInstance, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.src != nil { + return s.src, nil + } + id, err := source.FromLLB(s.op, s.platform) + if err != nil { + return nil, err + } + src, err := s.sm.Resolve(ctx, id, s.sessM) + if err != nil { + return nil, err + } + s.src = src + return s.src, nil +} + +func (s *sourceOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) { + src, err := s.instance(ctx) + if err != nil { + return nil, false, err + } + k, done, err := src.CacheKey(ctx, index) + if err != nil { + return nil, false, err + } + + dgst := digest.FromBytes([]byte(sourceCacheType + ":" + k)) + + if strings.HasPrefix(k, "session:") { + dgst = digest.Digest("random:" + strings.TrimPrefix(dgst.String(), dgst.Algorithm().String()+":")) + } + + return &solver.CacheMap{ + // TODO: add os/arch + Digest: dgst, + }, done, nil +} + +func (s *sourceOp) Exec(ctx context.Context, _ []solver.Result) (outputs []solver.Result, err error) { + src, err := s.instance(ctx) + if err != nil { + return nil, err + } + ref, err := src.Snapshot(ctx) + if err != nil { + return nil, err + } + return []solver.Result{worker.NewWorkerRefResult(ref, s.w)}, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/result.go b/vendor/github.com/moby/buildkit/solver/llbsolver/result.go new file mode 100644 index 00000000..dc96e4d0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/result.go @@ -0,0 +1,74 @@ +package llbsolver + +import ( + "bytes" + "context" + "path" + + "github.com/moby/buildkit/cache/contenthash" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +type Selector struct { + Path string + Wildcard bool + FollowLinks bool +} + +func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { + return func(ctx context.Context, res solver.Result) (digest.Digest, error) { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return "", errors.Errorf("invalid reference: %T", res) + } + + if len(selectors) == 0 { + selectors = []Selector{{}} + } + + dgsts := make([][]byte, len(selectors)) + + eg, ctx := errgroup.WithContext(ctx) + + for i, sel := range selectors { + // FIXME(tonistiigi): enabling this parallelization seems to create wrong results for some big inputs(like gobuild) + // func(i int) { + // eg.Go(func() error { + if !sel.Wildcard { + dgst, err := contenthash.Checksum(ctx, ref.ImmutableRef, path.Join("/", sel.Path), sel.FollowLinks) + if err != nil { + return "", err + } + dgsts[i] = []byte(dgst) + } else { + dgst, err := contenthash.ChecksumWildcard(ctx, ref.ImmutableRef, path.Join("/", sel.Path), sel.FollowLinks) + if err != nil { + return "", err + } + dgsts[i] = []byte(dgst) + } + // return nil + // }) + // }(i) + } + + if err := eg.Wait(); err != nil { + return "", err + } + + return digest.FromBytes(bytes.Join(dgsts, []byte{0})), nil + } +} + +func workerRefConverter(ctx context.Context, res solver.Result) (*solver.Remote, error) { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid result: %T", res.Sys()) + } + + return ref.Worker.GetRemote(ctx, ref.ImmutableRef, true) +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go new file mode 100644 index 00000000..640a9738 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go @@ -0,0 +1,418 @@ +package llbsolver + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/remotecache" + "github.com/moby/buildkit/client" + controlgateway "github.com/moby/buildkit/control/gateway" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/gateway" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/entitlements" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +const keyEntitlements = "llb.entitlements" + +type ExporterRequest struct { + Exporter exporter.ExporterInstance + CacheExporter remotecache.Exporter + CacheExportMode solver.CacheExportMode +} + +// ResolveWorkerFunc returns default worker for the temporary default non-distributed use cases +type ResolveWorkerFunc func() (worker.Worker, error) + +type Solver struct { + workerController *worker.Controller + solver *solver.Solver + resolveWorker ResolveWorkerFunc + eachWorker func(func(worker.Worker) error) error + frontends map[string]frontend.Frontend + resolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc + platforms []specs.Platform + gatewayForwarder *controlgateway.GatewayForwarder + sm *session.Manager + entitlements []string +} + +func New(wc *worker.Controller, f map[string]frontend.Frontend, cache solver.CacheManager, resolveCI map[string]remotecache.ResolveCacheImporterFunc, gatewayForwarder *controlgateway.GatewayForwarder, sm *session.Manager, ents []string) (*Solver, error) { + s := &Solver{ + workerController: wc, + resolveWorker: defaultResolver(wc), + eachWorker: allWorkers(wc), + frontends: f, + resolveCacheImporterFuncs: resolveCI, + gatewayForwarder: gatewayForwarder, + sm: sm, + entitlements: ents, + } + + // executing is currently only allowed on default worker + w, err := wc.GetDefault() + if err != nil { + return nil, err + } + s.platforms = w.Platforms(false) + + s.solver = solver.NewSolver(solver.SolverOpt{ + ResolveOpFunc: s.resolver(), + DefaultCache: cache, + }) + return s, nil +} + +func (s *Solver) resolver() solver.ResolveOpFunc { + return func(v solver.Vertex, b solver.Builder) (solver.Op, error) { + w, err := s.resolveWorker() + if err != nil { + return nil, err + } + return w.ResolveOp(v, s.Bridge(b), s.sm) + } +} + +func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge { + return &llbBridge{ + builder: b, + frontends: s.frontends, + resolveWorker: s.resolveWorker, + eachWorker: s.eachWorker, + resolveCacheImporterFuncs: s.resolveCacheImporterFuncs, + cms: map[string]solver.CacheManager{}, + platforms: s.platforms, + sm: s.sm, + } +} + +func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest, exp ExporterRequest, ent []entitlements.Entitlement) (*client.SolveResponse, error) { + j, err := s.solver.NewJob(id) + if err != nil { + return nil, err + } + + defer j.Discard() + + set, err := entitlements.WhiteList(ent, supportedEntitlements(s.entitlements)) + if err != nil { + return nil, err + } + j.SetValue(keyEntitlements, set) + + j.SessionID = session.FromContext(ctx) + + var res *frontend.Result + if s.gatewayForwarder != nil && req.Definition == nil && req.Frontend == "" { + fwd := gateway.NewBridgeForwarder(ctx, s.Bridge(j), s.workerController, req.FrontendInputs) + defer fwd.Discard() + if err := s.gatewayForwarder.RegisterBuild(ctx, id, fwd); err != nil { + return nil, err + } + defer s.gatewayForwarder.UnregisterBuild(ctx, id) + + var err error + select { + case <-fwd.Done(): + res, err = fwd.Result() + case <-ctx.Done(): + err = ctx.Err() + } + if err != nil { + return nil, err + } + } else { + res, err = s.Bridge(j).Solve(ctx, req) + if err != nil { + return nil, err + } + } + + defer func() { + res.EachRef(func(ref solver.ResultProxy) error { + go ref.Release(context.TODO()) + return nil + }) + }() + + eg, ctx2 := errgroup.WithContext(ctx) + res.EachRef(func(ref solver.ResultProxy) error { + eg.Go(func() error { + _, err := ref.Result(ctx2) + return err + }) + return nil + }) + if err := eg.Wait(); err != nil { + return nil, err + } + + var exporterResponse map[string]string + if e := exp.Exporter; e != nil { + inp := exporter.Source{ + Metadata: res.Metadata, + } + if inp.Metadata == nil { + inp.Metadata = make(map[string][]byte) + } + if res := res.Ref; res != nil { + r, err := res.Result(ctx) + if err != nil { + return nil, err + } + workerRef, ok := r.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference: %T", r.Sys()) + } + inp.Ref = workerRef.ImmutableRef + + dt, err := inlineCache(ctx, exp.CacheExporter, r) + if err != nil { + return nil, err + } + if dt != nil { + inp.Metadata[exptypes.ExporterInlineCache] = dt + } + } + if res.Refs != nil { + m := make(map[string]cache.ImmutableRef, len(res.Refs)) + for k, res := range res.Refs { + if res == nil { + m[k] = nil + } else { + r, err := res.Result(ctx) + if err != nil { + return nil, err + } + workerRef, ok := r.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference: %T", r.Sys()) + } + m[k] = workerRef.ImmutableRef + + dt, err := inlineCache(ctx, exp.CacheExporter, r) + if err != nil { + return nil, err + } + if dt != nil { + inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterInlineCache, k)] = dt + } + } + } + inp.Refs = m + } + + if err := inVertexContext(j.Context(ctx), e.Name(), "", func(ctx context.Context) error { + exporterResponse, err = e.Export(ctx, inp) + return err + }); err != nil { + return nil, err + } + } + + var cacheExporterResponse map[string]string + if e := exp.CacheExporter; e != nil { + if err := inVertexContext(j.Context(ctx), "exporting cache", "", func(ctx context.Context) error { + prepareDone := oneOffProgress(ctx, "preparing build cache for export") + if err := res.EachRef(func(res solver.ResultProxy) error { + r, err := res.Result(ctx) + if err != nil { + return err + } + // all keys have same export chain so exporting others is not needed + _, err = r.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{ + Convert: workerRefConverter, + Mode: exp.CacheExportMode, + }) + return err + }); err != nil { + return prepareDone(err) + } + prepareDone(nil) + cacheExporterResponse, err = e.Finalize(ctx) + return err + }); err != nil { + return nil, err + } + } + + if exporterResponse == nil { + exporterResponse = make(map[string]string) + } + + for k, v := range res.Metadata { + if strings.HasPrefix(k, "frontend.") { + exporterResponse[k] = string(v) + } + } + for k, v := range cacheExporterResponse { + if strings.HasPrefix(k, "cache.") { + exporterResponse[k] = v + } + } + + return &client.SolveResponse{ + ExporterResponse: exporterResponse, + }, nil +} + +func inlineCache(ctx context.Context, e remotecache.Exporter, res solver.CachedResult) ([]byte, error) { + if efl, ok := e.(interface { + ExportForLayers([]digest.Digest) ([]byte, error) + }); ok { + workerRef, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference: %T", res.Sys()) + } + + remote, err := workerRef.Worker.GetRemote(ctx, workerRef.ImmutableRef, true) + if err != nil || remote == nil { + return nil, nil + } + + digests := make([]digest.Digest, 0, len(remote.Descriptors)) + for _, desc := range remote.Descriptors { + digests = append(digests, desc.Digest) + } + + if _, err := res.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{ + Convert: workerRefConverter, + Mode: solver.CacheExportModeMin, + }); err != nil { + return nil, err + } + + return efl.ExportForLayers(digests) + } + return nil, nil +} + +func (s *Solver) Status(ctx context.Context, id string, statusChan chan *client.SolveStatus) error { + j, err := s.solver.Get(id) + if err != nil { + close(statusChan) + return err + } + return j.Status(ctx, statusChan) +} + +func defaultResolver(wc *worker.Controller) ResolveWorkerFunc { + return func() (worker.Worker, error) { + return wc.GetDefault() + } +} +func allWorkers(wc *worker.Controller) func(func(w worker.Worker) error) error { + return func(f func(worker.Worker) error) error { + all, err := wc.List() + if err != nil { + return err + } + for _, w := range all { + if err := f(w); err != nil { + return err + } + } + return nil + } +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} + +func inVertexContext(ctx context.Context, name, id string, f func(ctx context.Context) error) error { + if id == "" { + id = name + } + v := client.Vertex{ + Digest: digest.FromBytes([]byte(id)), + Name: name, + } + pw, _, ctx := progress.FromContext(ctx, progress.WithMetadata("vertex", v.Digest)) + notifyStarted(ctx, &v, false) + defer pw.Close() + err := f(ctx) + notifyCompleted(ctx, &v, err, false) + return err +} + +func notifyStarted(ctx context.Context, v *client.Vertex, cached bool) { + pw, _, _ := progress.FromContext(ctx) + defer pw.Close() + now := time.Now() + v.Started = &now + v.Completed = nil + v.Cached = cached + pw.Write(v.Digest.String(), *v) +} + +func notifyCompleted(ctx context.Context, v *client.Vertex, err error, cached bool) { + pw, _, _ := progress.FromContext(ctx) + defer pw.Close() + now := time.Now() + if v.Started == nil { + v.Started = &now + } + v.Completed = &now + v.Cached = cached + if err != nil { + v.Error = err.Error() + } + pw.Write(v.Digest.String(), *v) +} + +func supportedEntitlements(ents []string) []entitlements.Entitlement { + out := []entitlements.Entitlement{} // nil means no filter + for _, e := range ents { + if e == string(entitlements.EntitlementNetworkHost) { + out = append(out, entitlements.EntitlementNetworkHost) + } + if e == string(entitlements.EntitlementSecurityInsecure) { + out = append(out, entitlements.EntitlementSecurityInsecure) + } + } + return out +} + +func loadEntitlements(b solver.Builder) (entitlements.Set, error) { + var ent entitlements.Set = map[entitlements.Entitlement]struct{}{} + err := b.EachValue(context.TODO(), keyEntitlements, func(v interface{}) error { + set, ok := v.(entitlements.Set) + if !ok { + return errors.Errorf("invalid entitlements %T", v) + } + for k := range set { + ent[k] = struct{}{} + } + return nil + }) + if err != nil { + return nil, err + } + return ent, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go b/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go new file mode 100644 index 00000000..2dbded12 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go @@ -0,0 +1,353 @@ +package llbsolver + +import ( + "fmt" + "strings" + + "github.com/containerd/containerd/platforms" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/util/binfmt_misc" + "github.com/moby/buildkit/util/entitlements" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type vertex struct { + sys interface{} + options solver.VertexOptions + inputs []solver.Edge + digest digest.Digest + name string +} + +func (v *vertex) Digest() digest.Digest { + return v.digest +} + +func (v *vertex) Sys() interface{} { + return v.sys +} + +func (v *vertex) Options() solver.VertexOptions { + return v.options +} + +func (v *vertex) Inputs() []solver.Edge { + return v.inputs +} + +func (v *vertex) Name() string { + if name, ok := v.options.Description["llb.customname"]; ok { + return name + } + return v.name +} + +type LoadOpt func(*pb.Op, *pb.OpMetadata, *solver.VertexOptions) error + +func WithValidateCaps() LoadOpt { + cs := pb.Caps.CapSet(pb.Caps.All()) + return func(_ *pb.Op, md *pb.OpMetadata, opt *solver.VertexOptions) error { + if md != nil { + for c := range md.Caps { + if err := cs.Supports(c); err != nil { + return err + } + } + } + return nil + } +} + +func WithCacheSources(cms []solver.CacheManager) LoadOpt { + return func(_ *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error { + opt.CacheSources = cms + return nil + } +} + +func RuntimePlatforms(p []specs.Platform) LoadOpt { + var defaultPlatform *pb.Platform + pp := make([]specs.Platform, len(p)) + for i := range p { + pp[i] = platforms.Normalize(p[i]) + } + return func(op *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error { + if op.Platform == nil { + if defaultPlatform == nil { + p := platforms.DefaultSpec() + defaultPlatform = &pb.Platform{ + OS: p.OS, + Architecture: p.Architecture, + Variant: p.Variant, + } + } + op.Platform = defaultPlatform + } + platform := specs.Platform{OS: op.Platform.OS, Architecture: op.Platform.Architecture, Variant: op.Platform.Variant} + normalizedPlatform := platforms.Normalize(platform) + + op.Platform = &pb.Platform{ + OS: normalizedPlatform.OS, + Architecture: normalizedPlatform.Architecture, + Variant: normalizedPlatform.Variant, + } + + if _, ok := op.Op.(*pb.Op_Exec); ok { + var found bool + for _, pp := range pp { + if pp.OS == op.Platform.OS && pp.Architecture == op.Platform.Architecture && pp.Variant == op.Platform.Variant { + found = true + break + } + } + if !found { + if !binfmt_misc.Check(normalizedPlatform) { + return errors.Errorf("runtime execution on platform %s not supported", platforms.Format(specs.Platform{OS: op.Platform.OS, Architecture: op.Platform.Architecture, Variant: op.Platform.Variant})) + } else { + pp = append(pp, normalizedPlatform) + } + } + } + return nil + } +} + +func ValidateEntitlements(ent entitlements.Set) LoadOpt { + return func(op *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error { + switch op := op.Op.(type) { + case *pb.Op_Exec: + if op.Exec.Network == pb.NetMode_HOST { + if !ent.Allowed(entitlements.EntitlementNetworkHost) { + return errors.Errorf("%s is not allowed", entitlements.EntitlementNetworkHost) + } + } + + if op.Exec.Security == pb.SecurityMode_INSECURE { + if !ent.Allowed(entitlements.EntitlementSecurityInsecure) { + return errors.Errorf("%s is not allowed", entitlements.EntitlementSecurityInsecure) + } + } + } + return nil + } +} + +type detectPrunedCacheID struct { + ids map[string]struct{} +} + +func (dpc *detectPrunedCacheID) Load(op *pb.Op, md *pb.OpMetadata, opt *solver.VertexOptions) error { + if md == nil || !md.IgnoreCache { + return nil + } + switch op := op.Op.(type) { + case *pb.Op_Exec: + for _, m := range op.Exec.GetMounts() { + if m.MountType == pb.MountType_CACHE { + if m.CacheOpt != nil { + id := m.CacheOpt.ID + if id == "" { + id = m.Dest + } + if dpc.ids == nil { + dpc.ids = map[string]struct{}{} + } + dpc.ids[id] = struct{}{} + } + } + } + } + return nil +} + +func Load(def *pb.Definition, opts ...LoadOpt) (solver.Edge, error) { + return loadLLB(def, func(dgst digest.Digest, pbOp *pb.Op, load func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error) { + opMetadata := def.Metadata[dgst] + vtx, err := newVertex(dgst, pbOp, &opMetadata, load, opts...) + if err != nil { + return nil, err + } + return vtx, nil + }) +} + +func newVertex(dgst digest.Digest, op *pb.Op, opMeta *pb.OpMetadata, load func(digest.Digest) (solver.Vertex, error), opts ...LoadOpt) (*vertex, error) { + opt := solver.VertexOptions{} + if opMeta != nil { + opt.IgnoreCache = opMeta.IgnoreCache + opt.Description = opMeta.Description + if opMeta.ExportCache != nil { + opt.ExportCache = &opMeta.ExportCache.Value + } + } + for _, fn := range opts { + if err := fn(op, opMeta, &opt); err != nil { + return nil, err + } + } + + vtx := &vertex{sys: op, options: opt, digest: dgst, name: llbOpName(op)} + for _, in := range op.Inputs { + sub, err := load(in.Digest) + if err != nil { + return nil, err + } + vtx.inputs = append(vtx.inputs, solver.Edge{Index: solver.Index(in.Index), Vertex: sub}) + } + return vtx, nil +} + +// loadLLB loads LLB. +// fn is executed sequentially. +func loadLLB(def *pb.Definition, fn func(digest.Digest, *pb.Op, func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error)) (solver.Edge, error) { + if len(def.Def) == 0 { + return solver.Edge{}, errors.New("invalid empty definition") + } + + allOps := make(map[digest.Digest]*pb.Op) + + var dgst digest.Digest + + for _, dt := range def.Def { + var op pb.Op + if err := (&op).Unmarshal(dt); err != nil { + return solver.Edge{}, errors.Wrap(err, "failed to parse llb proto op") + } + dgst = digest.FromBytes(dt) + allOps[dgst] = &op + } + + if len(allOps) < 2 { + return solver.Edge{}, errors.Errorf("invalid LLB with %d vertexes", len(allOps)) + } + + lastOp := allOps[dgst] + delete(allOps, dgst) + if len(lastOp.Inputs) == 0 { + return solver.Edge{}, errors.Errorf("invalid LLB with no inputs on last vertex") + } + dgst = lastOp.Inputs[0].Digest + + cache := make(map[digest.Digest]solver.Vertex) + + var rec func(dgst digest.Digest) (solver.Vertex, error) + rec = func(dgst digest.Digest) (solver.Vertex, error) { + if v, ok := cache[dgst]; ok { + return v, nil + } + op, ok := allOps[dgst] + if !ok { + return nil, errors.Errorf("invalid missing input digest %s", dgst) + } + + if err := ValidateOp(op); err != nil { + return nil, err + } + + v, err := fn(dgst, op, rec) + if err != nil { + return nil, err + } + cache[dgst] = v + return v, nil + } + + v, err := rec(dgst) + if err != nil { + return solver.Edge{}, err + } + return solver.Edge{Vertex: v, Index: solver.Index(lastOp.Inputs[0].Index)}, nil +} + +func llbOpName(op *pb.Op) string { + switch op := op.Op.(type) { + case *pb.Op_Source: + if id, err := source.FromLLB(op, nil); err == nil { + if id, ok := id.(*source.LocalIdentifier); ok { + if len(id.IncludePatterns) == 1 { + return op.Source.Identifier + " (" + id.IncludePatterns[0] + ")" + } + } + } + return op.Source.Identifier + case *pb.Op_Exec: + return strings.Join(op.Exec.Meta.Args, " ") + case *pb.Op_File: + return fileOpName(op.File.Actions) + case *pb.Op_Build: + return "build" + default: + return "unknown" + } +} + +func ValidateOp(op *pb.Op) error { + if op == nil { + return errors.Errorf("invalid nil op") + } + + switch op := op.Op.(type) { + case *pb.Op_Source: + if op.Source == nil { + return errors.Errorf("invalid nil source op") + } + case *pb.Op_Exec: + if op.Exec == nil { + return errors.Errorf("invalid nil exec op") + } + if op.Exec.Meta == nil { + return errors.Errorf("invalid exec op with no meta") + } + if len(op.Exec.Meta.Args) == 0 { + return errors.Errorf("invalid exec op with no args") + } + if len(op.Exec.Mounts) == 0 { + return errors.Errorf("invalid exec op with no mounts") + } + + isRoot := false + for _, m := range op.Exec.Mounts { + if m.Dest == pb.RootMount { + isRoot = true + break + } + } + if !isRoot { + return errors.Errorf("invalid exec op with no rootfs") + } + case *pb.Op_File: + if op.File == nil { + return errors.Errorf("invalid nil file op") + } + if len(op.File.Actions) == 0 { + return errors.Errorf("invalid file op with no actions") + } + case *pb.Op_Build: + if op.Build == nil { + return errors.Errorf("invalid nil build op") + } + } + return nil +} + +func fileOpName(actions []*pb.FileAction) string { + names := make([]string, 0, len(actions)) + for _, action := range actions { + switch a := action.Action.(type) { + case *pb.FileAction_Mkdir: + names = append(names, fmt.Sprintf("mkdir %s", a.Mkdir.Path)) + case *pb.FileAction_Mkfile: + names = append(names, fmt.Sprintf("mkfile %s", a.Mkfile.Path)) + case *pb.FileAction_Rm: + names = append(names, fmt.Sprintf("rm %s", a.Rm.Path)) + case *pb.FileAction_Copy: + names = append(names, fmt.Sprintf("copy %s %s", a.Copy.Src, a.Copy.Dest)) + } + } + + return strings.Join(names, ", ") +} diff --git a/vendor/github.com/moby/buildkit/solver/memorycachestorage.go b/vendor/github.com/moby/buildkit/solver/memorycachestorage.go new file mode 100644 index 00000000..e0e58f0a --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/memorycachestorage.go @@ -0,0 +1,307 @@ +package solver + +import ( + "context" + "sync" + "time" + + "github.com/pkg/errors" +) + +func NewInMemoryCacheStorage() CacheKeyStorage { + return &inMemoryStore{ + byID: map[string]*inMemoryKey{}, + byResult: map[string]map[string]struct{}{}, + } +} + +type inMemoryStore struct { + mu sync.RWMutex + byID map[string]*inMemoryKey + byResult map[string]map[string]struct{} +} + +type inMemoryKey struct { + id string + results map[string]CacheResult + links map[CacheInfoLink]map[string]struct{} + backlinks map[string]struct{} +} + +func (s *inMemoryStore) Exists(id string) bool { + s.mu.RLock() + defer s.mu.RUnlock() + if k, ok := s.byID[id]; ok { + return len(k.links) > 0 || len(k.results) > 0 + } + return false +} + +func newInMemoryKey(id string) *inMemoryKey { + return &inMemoryKey{ + results: map[string]CacheResult{}, + links: map[CacheInfoLink]map[string]struct{}{}, + backlinks: map[string]struct{}{}, + id: id, + } +} + +func (s *inMemoryStore) Walk(fn func(string) error) error { + s.mu.RLock() + ids := make([]string, 0, len(s.byID)) + for id := range s.byID { + ids = append(ids, id) + } + s.mu.RUnlock() + + for _, id := range ids { + if err := fn(id); err != nil { + return err + } + } + return nil +} + +func (s *inMemoryStore) WalkResults(id string, fn func(CacheResult) error) error { + s.mu.RLock() + + k, ok := s.byID[id] + if !ok { + s.mu.RUnlock() + return nil + } + copy := make([]CacheResult, 0, len(k.results)) + for _, res := range k.results { + copy = append(copy, res) + } + s.mu.RUnlock() + + for _, res := range copy { + if err := fn(res); err != nil { + return err + } + } + return nil +} + +func (s *inMemoryStore) Load(id string, resultID string) (CacheResult, error) { + s.mu.RLock() + defer s.mu.RUnlock() + k, ok := s.byID[id] + if !ok { + return CacheResult{}, errors.Wrapf(ErrNotFound, "no such key %s", id) + } + r, ok := k.results[resultID] + if !ok { + return CacheResult{}, errors.WithStack(ErrNotFound) + } + return r, nil +} + +func (s *inMemoryStore) AddResult(id string, res CacheResult) error { + s.mu.Lock() + defer s.mu.Unlock() + k, ok := s.byID[id] + if !ok { + k = newInMemoryKey(id) + s.byID[id] = k + } + k.results[res.ID] = res + m, ok := s.byResult[res.ID] + if !ok { + m = map[string]struct{}{} + s.byResult[res.ID] = m + } + m[id] = struct{}{} + return nil +} + +func (s *inMemoryStore) WalkIDsByResult(resultID string, fn func(string) error) error { + s.mu.Lock() + + ids := map[string]struct{}{} + for id := range s.byResult[resultID] { + ids[id] = struct{}{} + } + s.mu.Unlock() + + for id := range ids { + if err := fn(id); err != nil { + return err + } + } + + return nil +} + +func (s *inMemoryStore) Release(resultID string) error { + s.mu.Lock() + defer s.mu.Unlock() + + ids, ok := s.byResult[resultID] + if !ok { + return nil + } + + for id := range ids { + k, ok := s.byID[id] + if !ok { + continue + } + + delete(k.results, resultID) + delete(s.byResult[resultID], id) + if len(s.byResult[resultID]) == 0 { + delete(s.byResult, resultID) + } + + s.emptyBranchWithParents(k) + } + + return nil +} + +func (s *inMemoryStore) emptyBranchWithParents(k *inMemoryKey) { + if len(k.results) != 0 || len(k.links) != 0 { + return + } + for id := range k.backlinks { + p, ok := s.byID[id] + if !ok { + continue + } + for l := range p.links { + delete(p.links[l], k.id) + if len(p.links[l]) == 0 { + delete(p.links, l) + } + } + s.emptyBranchWithParents(p) + } + + delete(s.byID, k.id) +} + +func (s *inMemoryStore) AddLink(id string, link CacheInfoLink, target string) error { + s.mu.Lock() + defer s.mu.Unlock() + k, ok := s.byID[id] + if !ok { + k = newInMemoryKey(id) + s.byID[id] = k + } + k2, ok := s.byID[target] + if !ok { + k2 = newInMemoryKey(target) + s.byID[target] = k2 + } + m, ok := k.links[link] + if !ok { + m = map[string]struct{}{} + k.links[link] = m + } + + k2.backlinks[id] = struct{}{} + m[target] = struct{}{} + return nil +} + +func (s *inMemoryStore) WalkLinks(id string, link CacheInfoLink, fn func(id string) error) error { + s.mu.RLock() + k, ok := s.byID[id] + if !ok { + s.mu.RUnlock() + return nil + } + var links []string + for target := range k.links[link] { + links = append(links, target) + } + s.mu.RUnlock() + + for _, t := range links { + if err := fn(t); err != nil { + return err + } + } + return nil +} + +func (s *inMemoryStore) HasLink(id string, link CacheInfoLink, target string) bool { + s.mu.RLock() + defer s.mu.RUnlock() + if k, ok := s.byID[id]; ok { + if v, ok := k.links[link]; ok { + if _, ok := v[target]; ok { + return true + } + } + } + return false +} + +func (s *inMemoryStore) WalkBacklinks(id string, fn func(id string, link CacheInfoLink) error) error { + s.mu.RLock() + k, ok := s.byID[id] + if !ok { + s.mu.RUnlock() + return nil + } + var outIDs []string + var outLinks []CacheInfoLink + for bid := range k.backlinks { + b, ok := s.byID[bid] + if !ok { + continue + } + for l, m := range b.links { + if _, ok := m[id]; !ok { + continue + } + outIDs = append(outIDs, bid) + outLinks = append(outLinks, CacheInfoLink{ + Digest: rootKey(l.Digest, l.Output), + Input: l.Input, + Selector: l.Selector, + }) + } + } + s.mu.RUnlock() + + for i := range outIDs { + if err := fn(outIDs[i], outLinks[i]); err != nil { + return err + } + } + return nil +} + +func NewInMemoryResultStorage() CacheResultStorage { + return &inMemoryResultStore{m: &sync.Map{}} +} + +type inMemoryResultStore struct { + m *sync.Map +} + +func (s *inMemoryResultStore) Save(r Result, createdAt time.Time) (CacheResult, error) { + s.m.Store(r.ID(), r) + return CacheResult{ID: r.ID(), CreatedAt: createdAt}, nil +} + +func (s *inMemoryResultStore) Load(ctx context.Context, res CacheResult) (Result, error) { + v, ok := s.m.Load(res.ID) + if !ok { + return nil, errors.WithStack(ErrNotFound) + } + return v.(Result), nil +} + +func (s *inMemoryResultStore) LoadRemote(ctx context.Context, res CacheResult) (*Remote, error) { + return nil, nil +} + +func (s *inMemoryResultStore) Exists(id string) bool { + _, ok := s.m.Load(id) + return ok +} diff --git a/vendor/github.com/moby/buildkit/solver/pb/attr.go b/vendor/github.com/moby/buildkit/solver/pb/attr.go new file mode 100644 index 00000000..97d2971c --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/attr.go @@ -0,0 +1,25 @@ +package pb + +const AttrKeepGitDir = "git.keepgitdir" +const AttrFullRemoteURL = "git.fullurl" +const AttrLocalSessionID = "local.session" +const AttrLocalUniqueID = "local.unique" +const AttrIncludePatterns = "local.includepattern" +const AttrFollowPaths = "local.followpaths" +const AttrExcludePatterns = "local.excludepatterns" +const AttrSharedKeyHint = "local.sharedkeyhint" +const AttrLLBDefinitionFilename = "llbbuild.filename" + +const AttrHTTPChecksum = "http.checksum" +const AttrHTTPFilename = "http.filename" +const AttrHTTPPerm = "http.perm" +const AttrHTTPUID = "http.uid" +const AttrHTTPGID = "http.gid" + +const AttrImageResolveMode = "image.resolvemode" +const AttrImageResolveModeDefault = "default" +const AttrImageResolveModeForcePull = "pull" +const AttrImageResolveModePreferLocal = "local" +const AttrImageRecordType = "image.recordtype" + +type IsFileAction = isFileAction_Action diff --git a/vendor/github.com/moby/buildkit/solver/pb/caps.go b/vendor/github.com/moby/buildkit/solver/pb/caps.go new file mode 100644 index 00000000..93c77b3e --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/caps.go @@ -0,0 +1,299 @@ +package pb + +import "github.com/moby/buildkit/util/apicaps" + +var Caps apicaps.CapList + +// Every backwards or forwards non-compatible change needs to add a new capability row. +// By default new capabilities should be experimental. After merge a capability is +// considered immutable. After a capability is marked stable it should not be disabled. + +const ( + CapSourceImage apicaps.CapID = "source.image" + CapSourceImageResolveMode apicaps.CapID = "source.image.resolvemode" + CapSourceLocal apicaps.CapID = "source.local" + CapSourceLocalUnique apicaps.CapID = "source.local.unique" + CapSourceLocalSessionID apicaps.CapID = "source.local.sessionid" + CapSourceLocalIncludePatterns apicaps.CapID = "source.local.includepatterns" + CapSourceLocalFollowPaths apicaps.CapID = "source.local.followpaths" + CapSourceLocalExcludePatterns apicaps.CapID = "source.local.excludepatterns" + CapSourceLocalSharedKeyHint apicaps.CapID = "source.local.sharedkeyhint" + + CapSourceGit apicaps.CapID = "source.git" + CapSourceGitKeepDir apicaps.CapID = "source.git.keepgitdir" + CapSourceGitFullURL apicaps.CapID = "source.git.fullurl" + + CapSourceHTTP apicaps.CapID = "source.http" + CapSourceHTTPChecksum apicaps.CapID = "source.http.checksum" + CapSourceHTTPPerm apicaps.CapID = "source.http.perm" + CapSourceHTTPUIDGID apicaps.CapID = "soruce.http.uidgid" + + CapBuildOpLLBFileName apicaps.CapID = "source.buildop.llbfilename" + + CapExecMetaBase apicaps.CapID = "exec.meta.base" + CapExecMetaProxy apicaps.CapID = "exec.meta.proxyenv" + CapExecMetaNetwork apicaps.CapID = "exec.meta.network" + CapExecMetaSecurity apicaps.CapID = "exec.meta.security" + CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath" + CapExecMountBind apicaps.CapID = "exec.mount.bind" + CapExecMountBindReadWriteNoOuput apicaps.CapID = "exec.mount.bind.readwrite-nooutput" + CapExecMountCache apicaps.CapID = "exec.mount.cache" + CapExecMountCacheSharing apicaps.CapID = "exec.mount.cache.sharing" + CapExecMountSelector apicaps.CapID = "exec.mount.selector" + CapExecMountTmpfs apicaps.CapID = "exec.mount.tmpfs" + CapExecMountSecret apicaps.CapID = "exec.mount.secret" + CapExecMountSSH apicaps.CapID = "exec.mount.ssh" + CapExecCgroupsMounted apicaps.CapID = "exec.cgroup" + + CapExecMetaSecurityDeviceWhitelistV1 apicaps.CapID = "exec.meta.security.devices.v1" + + CapFileBase apicaps.CapID = "file.base" + CapFileRmWildcard apicaps.CapID = "file.rm.wildcard" + + CapConstraints apicaps.CapID = "constraints" + CapPlatform apicaps.CapID = "platform" + + CapMetaIgnoreCache apicaps.CapID = "meta.ignorecache" + CapMetaDescription apicaps.CapID = "meta.description" + CapMetaExportCache apicaps.CapID = "meta.exportcache" +) + +func init() { + + Caps.Init(apicaps.Cap{ + ID: CapSourceImage, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceImageResolveMode, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceLocal, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceLocalUnique, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceLocalSessionID, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceLocalIncludePatterns, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceLocalFollowPaths, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceLocalExcludePatterns, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceLocalSharedKeyHint, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ + ID: CapSourceGit, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceGitKeepDir, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceGitFullURL, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceHTTP, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceHTTPChecksum, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceHTTPPerm, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceHTTPUIDGID, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapBuildOpLLBFileName, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapExecMetaBase, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapExecMetaProxy, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapExecMetaNetwork, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapExecMetaSetsDefaultPath, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapExecMetaSecurity, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapExecMetaSecurityDeviceWhitelistV1, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapExecMountBind, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapExecMountBindReadWriteNoOuput, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapExecMountCache, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapExecMountCacheSharing, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapExecMountSelector, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapExecMountTmpfs, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapExecMountSecret, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapExecMountSSH, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapExecCgroupsMounted, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapFileBase, + Enabled: true, + Status: apicaps.CapStatusPrerelease, + SupportedHint: map[string]string{ + "docker": "Docker v19.03", + "buildkit": "BuildKit v0.5.0", + }, + }) + + Caps.Init(apicaps.Cap{ + ID: CapFileRmWildcard, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapConstraints, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapPlatform, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapMetaIgnoreCache, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapMetaDescription, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapMetaExportCache, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) +} diff --git a/vendor/github.com/moby/buildkit/solver/pb/const.go b/vendor/github.com/moby/buildkit/solver/pb/const.go new file mode 100644 index 00000000..c2d20b29 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/const.go @@ -0,0 +1,25 @@ +package pb + +// InputIndex is incrementing index to the input vertex +type InputIndex int64 + +// OutputIndex is incrementing index that another vertex can depend on +type OutputIndex int64 + +// RootMount is a base mountpoint +const RootMount = "/" + +// SkipOutput marks a disabled output index +const SkipOutput OutputIndex = -1 + +// Empty marks an input with no content +const Empty InputIndex = -1 + +// LLBBuilder is a special builder for BuildOp that directly builds LLB +const LLBBuilder InputIndex = -1 + +// LLBDefinitionInput marks an input that contains LLB definition for BuildOp +const LLBDefinitionInput = "buildkit.llb.definition" + +// LLBDefaultDefinitionFile is a filename containing the definition in LLBBuilder +const LLBDefaultDefinitionFile = LLBDefinitionInput diff --git a/vendor/github.com/moby/buildkit/solver/pb/generate.go b/vendor/github.com/moby/buildkit/solver/pb/generate.go new file mode 100644 index 00000000..c31e148f --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/generate.go @@ -0,0 +1,3 @@ +package pb + +//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. ops.proto diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go new file mode 100644 index 00000000..bfab9e37 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go @@ -0,0 +1,9629 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ops.proto + +// Package pb provides the protobuf definition of LLB: low-level builder instruction. +// LLB is DAG-structured; Op represents a vertex, and Definition represents a graph. + +package pb + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + github_com_moby_buildkit_util_apicaps "github.com/moby/buildkit/util/apicaps" + github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type NetMode int32 + +const ( + NetMode_UNSET NetMode = 0 + NetMode_HOST NetMode = 1 + NetMode_NONE NetMode = 2 +) + +var NetMode_name = map[int32]string{ + 0: "UNSET", + 1: "HOST", + 2: "NONE", +} + +var NetMode_value = map[string]int32{ + "UNSET": 0, + "HOST": 1, + "NONE": 2, +} + +func (x NetMode) String() string { + return proto.EnumName(NetMode_name, int32(x)) +} + +func (NetMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{0} +} + +type SecurityMode int32 + +const ( + SecurityMode_SANDBOX SecurityMode = 0 + SecurityMode_INSECURE SecurityMode = 1 +) + +var SecurityMode_name = map[int32]string{ + 0: "SANDBOX", + 1: "INSECURE", +} + +var SecurityMode_value = map[string]int32{ + "SANDBOX": 0, + "INSECURE": 1, +} + +func (x SecurityMode) String() string { + return proto.EnumName(SecurityMode_name, int32(x)) +} + +func (SecurityMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{1} +} + +// MountType defines a type of a mount from a supported set +type MountType int32 + +const ( + MountType_BIND MountType = 0 + MountType_SECRET MountType = 1 + MountType_SSH MountType = 2 + MountType_CACHE MountType = 3 + MountType_TMPFS MountType = 4 +) + +var MountType_name = map[int32]string{ + 0: "BIND", + 1: "SECRET", + 2: "SSH", + 3: "CACHE", + 4: "TMPFS", +} + +var MountType_value = map[string]int32{ + "BIND": 0, + "SECRET": 1, + "SSH": 2, + "CACHE": 3, + "TMPFS": 4, +} + +func (x MountType) String() string { + return proto.EnumName(MountType_name, int32(x)) +} + +func (MountType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{2} +} + +// CacheSharingOpt defines different sharing modes for cache mount +type CacheSharingOpt int32 + +const ( + // SHARED cache mount can be used concurrently by multiple writers + CacheSharingOpt_SHARED CacheSharingOpt = 0 + // PRIVATE creates a new mount if there are multiple writers + CacheSharingOpt_PRIVATE CacheSharingOpt = 1 + // LOCKED pauses second writer until first one releases the mount + CacheSharingOpt_LOCKED CacheSharingOpt = 2 +) + +var CacheSharingOpt_name = map[int32]string{ + 0: "SHARED", + 1: "PRIVATE", + 2: "LOCKED", +} + +var CacheSharingOpt_value = map[string]int32{ + "SHARED": 0, + "PRIVATE": 1, + "LOCKED": 2, +} + +func (x CacheSharingOpt) String() string { + return proto.EnumName(CacheSharingOpt_name, int32(x)) +} + +func (CacheSharingOpt) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{3} +} + +// Op represents a vertex of the LLB DAG. +type Op struct { + // inputs is a set of input edges. + Inputs []*Input `protobuf:"bytes,1,rep,name=inputs,proto3" json:"inputs,omitempty"` + // Types that are valid to be assigned to Op: + // *Op_Exec + // *Op_Source + // *Op_File + // *Op_Build + Op isOp_Op `protobuf_oneof:"op"` + Platform *Platform `protobuf:"bytes,10,opt,name=platform,proto3" json:"platform,omitempty"` + Constraints *WorkerConstraints `protobuf:"bytes,11,opt,name=constraints,proto3" json:"constraints,omitempty"` +} + +func (m *Op) Reset() { *m = Op{} } +func (m *Op) String() string { return proto.CompactTextString(m) } +func (*Op) ProtoMessage() {} +func (*Op) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{0} +} +func (m *Op) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Op) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Op) XXX_Merge(src proto.Message) { + xxx_messageInfo_Op.Merge(m, src) +} +func (m *Op) XXX_Size() int { + return m.Size() +} +func (m *Op) XXX_DiscardUnknown() { + xxx_messageInfo_Op.DiscardUnknown(m) +} + +var xxx_messageInfo_Op proto.InternalMessageInfo + +type isOp_Op interface { + isOp_Op() + MarshalTo([]byte) (int, error) + Size() int +} + +type Op_Exec struct { + Exec *ExecOp `protobuf:"bytes,2,opt,name=exec,proto3,oneof" json:"exec,omitempty"` +} +type Op_Source struct { + Source *SourceOp `protobuf:"bytes,3,opt,name=source,proto3,oneof" json:"source,omitempty"` +} +type Op_File struct { + File *FileOp `protobuf:"bytes,4,opt,name=file,proto3,oneof" json:"file,omitempty"` +} +type Op_Build struct { + Build *BuildOp `protobuf:"bytes,5,opt,name=build,proto3,oneof" json:"build,omitempty"` +} + +func (*Op_Exec) isOp_Op() {} +func (*Op_Source) isOp_Op() {} +func (*Op_File) isOp_Op() {} +func (*Op_Build) isOp_Op() {} + +func (m *Op) GetOp() isOp_Op { + if m != nil { + return m.Op + } + return nil +} + +func (m *Op) GetInputs() []*Input { + if m != nil { + return m.Inputs + } + return nil +} + +func (m *Op) GetExec() *ExecOp { + if x, ok := m.GetOp().(*Op_Exec); ok { + return x.Exec + } + return nil +} + +func (m *Op) GetSource() *SourceOp { + if x, ok := m.GetOp().(*Op_Source); ok { + return x.Source + } + return nil +} + +func (m *Op) GetFile() *FileOp { + if x, ok := m.GetOp().(*Op_File); ok { + return x.File + } + return nil +} + +func (m *Op) GetBuild() *BuildOp { + if x, ok := m.GetOp().(*Op_Build); ok { + return x.Build + } + return nil +} + +func (m *Op) GetPlatform() *Platform { + if m != nil { + return m.Platform + } + return nil +} + +func (m *Op) GetConstraints() *WorkerConstraints { + if m != nil { + return m.Constraints + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Op) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Op_Exec)(nil), + (*Op_Source)(nil), + (*Op_File)(nil), + (*Op_Build)(nil), + } +} + +// Platform is github.com/opencontainers/image-spec/specs-go/v1.Platform +type Platform struct { + Architecture string `protobuf:"bytes,1,opt,name=Architecture,proto3" json:"Architecture,omitempty"` + OS string `protobuf:"bytes,2,opt,name=OS,proto3" json:"OS,omitempty"` + Variant string `protobuf:"bytes,3,opt,name=Variant,proto3" json:"Variant,omitempty"` + OSVersion string `protobuf:"bytes,4,opt,name=OSVersion,proto3" json:"OSVersion,omitempty"` + OSFeatures []string `protobuf:"bytes,5,rep,name=OSFeatures,proto3" json:"OSFeatures,omitempty"` +} + +func (m *Platform) Reset() { *m = Platform{} } +func (m *Platform) String() string { return proto.CompactTextString(m) } +func (*Platform) ProtoMessage() {} +func (*Platform) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{1} +} +func (m *Platform) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Platform) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Platform) XXX_Merge(src proto.Message) { + xxx_messageInfo_Platform.Merge(m, src) +} +func (m *Platform) XXX_Size() int { + return m.Size() +} +func (m *Platform) XXX_DiscardUnknown() { + xxx_messageInfo_Platform.DiscardUnknown(m) +} + +var xxx_messageInfo_Platform proto.InternalMessageInfo + +func (m *Platform) GetArchitecture() string { + if m != nil { + return m.Architecture + } + return "" +} + +func (m *Platform) GetOS() string { + if m != nil { + return m.OS + } + return "" +} + +func (m *Platform) GetVariant() string { + if m != nil { + return m.Variant + } + return "" +} + +func (m *Platform) GetOSVersion() string { + if m != nil { + return m.OSVersion + } + return "" +} + +func (m *Platform) GetOSFeatures() []string { + if m != nil { + return m.OSFeatures + } + return nil +} + +// Input represents an input edge for an Op. +type Input struct { + // digest of the marshaled input Op + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + // output index of the input Op + Index OutputIndex `protobuf:"varint,2,opt,name=index,proto3,customtype=OutputIndex" json:"index"` +} + +func (m *Input) Reset() { *m = Input{} } +func (m *Input) String() string { return proto.CompactTextString(m) } +func (*Input) ProtoMessage() {} +func (*Input) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{2} +} +func (m *Input) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Input) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Input) XXX_Merge(src proto.Message) { + xxx_messageInfo_Input.Merge(m, src) +} +func (m *Input) XXX_Size() int { + return m.Size() +} +func (m *Input) XXX_DiscardUnknown() { + xxx_messageInfo_Input.DiscardUnknown(m) +} + +var xxx_messageInfo_Input proto.InternalMessageInfo + +// ExecOp executes a command in a container. +type ExecOp struct { + Meta *Meta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` + Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` + Network NetMode `protobuf:"varint,3,opt,name=network,proto3,enum=pb.NetMode" json:"network,omitempty"` + Security SecurityMode `protobuf:"varint,4,opt,name=security,proto3,enum=pb.SecurityMode" json:"security,omitempty"` +} + +func (m *ExecOp) Reset() { *m = ExecOp{} } +func (m *ExecOp) String() string { return proto.CompactTextString(m) } +func (*ExecOp) ProtoMessage() {} +func (*ExecOp) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{3} +} +func (m *ExecOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExecOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecOp.Merge(m, src) +} +func (m *ExecOp) XXX_Size() int { + return m.Size() +} +func (m *ExecOp) XXX_DiscardUnknown() { + xxx_messageInfo_ExecOp.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecOp proto.InternalMessageInfo + +func (m *ExecOp) GetMeta() *Meta { + if m != nil { + return m.Meta + } + return nil +} + +func (m *ExecOp) GetMounts() []*Mount { + if m != nil { + return m.Mounts + } + return nil +} + +func (m *ExecOp) GetNetwork() NetMode { + if m != nil { + return m.Network + } + return NetMode_UNSET +} + +func (m *ExecOp) GetSecurity() SecurityMode { + if m != nil { + return m.Security + } + return SecurityMode_SANDBOX +} + +// Meta is a set of arguments for ExecOp. +// Meta is unrelated to LLB metadata. +// FIXME: rename (ExecContext? ExecArgs?) +type Meta struct { + Args []string `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"` + Env []string `protobuf:"bytes,2,rep,name=env,proto3" json:"env,omitempty"` + Cwd string `protobuf:"bytes,3,opt,name=cwd,proto3" json:"cwd,omitempty"` + User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` + ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv,proto3" json:"proxy_env,omitempty"` + ExtraHosts []*HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` +} + +func (m *Meta) Reset() { *m = Meta{} } +func (m *Meta) String() string { return proto.CompactTextString(m) } +func (*Meta) ProtoMessage() {} +func (*Meta) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{4} +} +func (m *Meta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Meta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Meta) XXX_Merge(src proto.Message) { + xxx_messageInfo_Meta.Merge(m, src) +} +func (m *Meta) XXX_Size() int { + return m.Size() +} +func (m *Meta) XXX_DiscardUnknown() { + xxx_messageInfo_Meta.DiscardUnknown(m) +} + +var xxx_messageInfo_Meta proto.InternalMessageInfo + +func (m *Meta) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *Meta) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + +func (m *Meta) GetCwd() string { + if m != nil { + return m.Cwd + } + return "" +} + +func (m *Meta) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *Meta) GetProxyEnv() *ProxyEnv { + if m != nil { + return m.ProxyEnv + } + return nil +} + +func (m *Meta) GetExtraHosts() []*HostIP { + if m != nil { + return m.ExtraHosts + } + return nil +} + +// Mount specifies how to mount an input Op as a filesystem. +type Mount struct { + Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` + Selector string `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` + Dest string `protobuf:"bytes,3,opt,name=dest,proto3" json:"dest,omitempty"` + Output OutputIndex `protobuf:"varint,4,opt,name=output,proto3,customtype=OutputIndex" json:"output"` + Readonly bool `protobuf:"varint,5,opt,name=readonly,proto3" json:"readonly,omitempty"` + MountType MountType `protobuf:"varint,6,opt,name=mountType,proto3,enum=pb.MountType" json:"mountType,omitempty"` + CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt,proto3" json:"cacheOpt,omitempty"` + SecretOpt *SecretOpt `protobuf:"bytes,21,opt,name=secretOpt,proto3" json:"secretOpt,omitempty"` + SSHOpt *SSHOpt `protobuf:"bytes,22,opt,name=SSHOpt,proto3" json:"SSHOpt,omitempty"` +} + +func (m *Mount) Reset() { *m = Mount{} } +func (m *Mount) String() string { return proto.CompactTextString(m) } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{5} +} +func (m *Mount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Mount) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mount.Merge(m, src) +} +func (m *Mount) XXX_Size() int { + return m.Size() +} +func (m *Mount) XXX_DiscardUnknown() { + xxx_messageInfo_Mount.DiscardUnknown(m) +} + +var xxx_messageInfo_Mount proto.InternalMessageInfo + +func (m *Mount) GetSelector() string { + if m != nil { + return m.Selector + } + return "" +} + +func (m *Mount) GetDest() string { + if m != nil { + return m.Dest + } + return "" +} + +func (m *Mount) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *Mount) GetMountType() MountType { + if m != nil { + return m.MountType + } + return MountType_BIND +} + +func (m *Mount) GetCacheOpt() *CacheOpt { + if m != nil { + return m.CacheOpt + } + return nil +} + +func (m *Mount) GetSecretOpt() *SecretOpt { + if m != nil { + return m.SecretOpt + } + return nil +} + +func (m *Mount) GetSSHOpt() *SSHOpt { + if m != nil { + return m.SSHOpt + } + return nil +} + +// CacheOpt defines options specific to cache mounts +type CacheOpt struct { + // ID is an optional namespace for the mount + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Sharing is the sharing mode for the mount + Sharing CacheSharingOpt `protobuf:"varint,2,opt,name=sharing,proto3,enum=pb.CacheSharingOpt" json:"sharing,omitempty"` +} + +func (m *CacheOpt) Reset() { *m = CacheOpt{} } +func (m *CacheOpt) String() string { return proto.CompactTextString(m) } +func (*CacheOpt) ProtoMessage() {} +func (*CacheOpt) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{6} +} +func (m *CacheOpt) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CacheOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CacheOpt) XXX_Merge(src proto.Message) { + xxx_messageInfo_CacheOpt.Merge(m, src) +} +func (m *CacheOpt) XXX_Size() int { + return m.Size() +} +func (m *CacheOpt) XXX_DiscardUnknown() { + xxx_messageInfo_CacheOpt.DiscardUnknown(m) +} + +var xxx_messageInfo_CacheOpt proto.InternalMessageInfo + +func (m *CacheOpt) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *CacheOpt) GetSharing() CacheSharingOpt { + if m != nil { + return m.Sharing + } + return CacheSharingOpt_SHARED +} + +// SecretOpt defines options describing secret mounts +type SecretOpt struct { + // ID of secret. Used for quering the value. + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // UID of secret file + Uid uint32 `protobuf:"varint,2,opt,name=uid,proto3" json:"uid,omitempty"` + // GID of secret file + Gid uint32 `protobuf:"varint,3,opt,name=gid,proto3" json:"gid,omitempty"` + // Mode is the filesystem mode of secret file + Mode uint32 `protobuf:"varint,4,opt,name=mode,proto3" json:"mode,omitempty"` + // Optional defines if secret value is required. Error is produced + // if value is not found and optional is false. + Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` +} + +func (m *SecretOpt) Reset() { *m = SecretOpt{} } +func (m *SecretOpt) String() string { return proto.CompactTextString(m) } +func (*SecretOpt) ProtoMessage() {} +func (*SecretOpt) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{7} +} +func (m *SecretOpt) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretOpt) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretOpt.Merge(m, src) +} +func (m *SecretOpt) XXX_Size() int { + return m.Size() +} +func (m *SecretOpt) XXX_DiscardUnknown() { + xxx_messageInfo_SecretOpt.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretOpt proto.InternalMessageInfo + +func (m *SecretOpt) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *SecretOpt) GetUid() uint32 { + if m != nil { + return m.Uid + } + return 0 +} + +func (m *SecretOpt) GetGid() uint32 { + if m != nil { + return m.Gid + } + return 0 +} + +func (m *SecretOpt) GetMode() uint32 { + if m != nil { + return m.Mode + } + return 0 +} + +func (m *SecretOpt) GetOptional() bool { + if m != nil { + return m.Optional + } + return false +} + +// SSHOpt defines options describing secret mounts +type SSHOpt struct { + // ID of exposed ssh rule. Used for quering the value. + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // UID of agent socket + Uid uint32 `protobuf:"varint,2,opt,name=uid,proto3" json:"uid,omitempty"` + // GID of agent socket + Gid uint32 `protobuf:"varint,3,opt,name=gid,proto3" json:"gid,omitempty"` + // Mode is the filesystem mode of agent socket + Mode uint32 `protobuf:"varint,4,opt,name=mode,proto3" json:"mode,omitempty"` + // Optional defines if ssh socket is required. Error is produced + // if client does not expose ssh. + Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` +} + +func (m *SSHOpt) Reset() { *m = SSHOpt{} } +func (m *SSHOpt) String() string { return proto.CompactTextString(m) } +func (*SSHOpt) ProtoMessage() {} +func (*SSHOpt) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{8} +} +func (m *SSHOpt) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SSHOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SSHOpt) XXX_Merge(src proto.Message) { + xxx_messageInfo_SSHOpt.Merge(m, src) +} +func (m *SSHOpt) XXX_Size() int { + return m.Size() +} +func (m *SSHOpt) XXX_DiscardUnknown() { + xxx_messageInfo_SSHOpt.DiscardUnknown(m) +} + +var xxx_messageInfo_SSHOpt proto.InternalMessageInfo + +func (m *SSHOpt) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *SSHOpt) GetUid() uint32 { + if m != nil { + return m.Uid + } + return 0 +} + +func (m *SSHOpt) GetGid() uint32 { + if m != nil { + return m.Gid + } + return 0 +} + +func (m *SSHOpt) GetMode() uint32 { + if m != nil { + return m.Mode + } + return 0 +} + +func (m *SSHOpt) GetOptional() bool { + if m != nil { + return m.Optional + } + return false +} + +// SourceOp specifies a source such as build contexts and images. +type SourceOp struct { + // TODO: use source type or any type instead of URL protocol. + // identifier e.g. local://, docker-image://, git://, https://... + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // attrs are defined in attr.go + Attrs map[string]string `protobuf:"bytes,2,rep,name=attrs,proto3" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *SourceOp) Reset() { *m = SourceOp{} } +func (m *SourceOp) String() string { return proto.CompactTextString(m) } +func (*SourceOp) ProtoMessage() {} +func (*SourceOp) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{9} +} +func (m *SourceOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceOp.Merge(m, src) +} +func (m *SourceOp) XXX_Size() int { + return m.Size() +} +func (m *SourceOp) XXX_DiscardUnknown() { + xxx_messageInfo_SourceOp.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceOp proto.InternalMessageInfo + +func (m *SourceOp) GetIdentifier() string { + if m != nil { + return m.Identifier + } + return "" +} + +func (m *SourceOp) GetAttrs() map[string]string { + if m != nil { + return m.Attrs + } + return nil +} + +// BuildOp is used for nested build invocation. +// BuildOp is experimental and can break without backwards compatibility +type BuildOp struct { + Builder InputIndex `protobuf:"varint,1,opt,name=builder,proto3,customtype=InputIndex" json:"builder"` + Inputs map[string]*BuildInput `protobuf:"bytes,2,rep,name=inputs,proto3" json:"inputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Def *Definition `protobuf:"bytes,3,opt,name=def,proto3" json:"def,omitempty"` + Attrs map[string]string `protobuf:"bytes,4,rep,name=attrs,proto3" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *BuildOp) Reset() { *m = BuildOp{} } +func (m *BuildOp) String() string { return proto.CompactTextString(m) } +func (*BuildOp) ProtoMessage() {} +func (*BuildOp) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{10} +} +func (m *BuildOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildOp.Merge(m, src) +} +func (m *BuildOp) XXX_Size() int { + return m.Size() +} +func (m *BuildOp) XXX_DiscardUnknown() { + xxx_messageInfo_BuildOp.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildOp proto.InternalMessageInfo + +func (m *BuildOp) GetInputs() map[string]*BuildInput { + if m != nil { + return m.Inputs + } + return nil +} + +func (m *BuildOp) GetDef() *Definition { + if m != nil { + return m.Def + } + return nil +} + +func (m *BuildOp) GetAttrs() map[string]string { + if m != nil { + return m.Attrs + } + return nil +} + +// BuildInput is used for BuildOp. +type BuildInput struct { + Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` +} + +func (m *BuildInput) Reset() { *m = BuildInput{} } +func (m *BuildInput) String() string { return proto.CompactTextString(m) } +func (*BuildInput) ProtoMessage() {} +func (*BuildInput) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{11} +} +func (m *BuildInput) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildInput) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildInput.Merge(m, src) +} +func (m *BuildInput) XXX_Size() int { + return m.Size() +} +func (m *BuildInput) XXX_DiscardUnknown() { + xxx_messageInfo_BuildInput.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildInput proto.InternalMessageInfo + +// OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time. +type OpMetadata struct { + // ignore_cache specifies to ignore the cache for this Op. + IgnoreCache bool `protobuf:"varint,1,opt,name=ignore_cache,json=ignoreCache,proto3" json:"ignore_cache,omitempty"` + // Description can be used for keeping any text fields that builder doesn't parse + Description map[string]string `protobuf:"bytes,2,rep,name=description,proto3" json:"description,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // index 3 reserved for WorkerConstraint in previous versions + // WorkerConstraint worker_constraint = 3; + ExportCache *ExportCache `protobuf:"bytes,4,opt,name=export_cache,json=exportCache,proto3" json:"export_cache,omitempty"` + Caps map[github_com_moby_buildkit_util_apicaps.CapID]bool `protobuf:"bytes,5,rep,name=caps,proto3,castkey=github.com/moby/buildkit/util/apicaps.CapID" json:"caps" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` +} + +func (m *OpMetadata) Reset() { *m = OpMetadata{} } +func (m *OpMetadata) String() string { return proto.CompactTextString(m) } +func (*OpMetadata) ProtoMessage() {} +func (*OpMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{12} +} +func (m *OpMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OpMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OpMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpMetadata.Merge(m, src) +} +func (m *OpMetadata) XXX_Size() int { + return m.Size() +} +func (m *OpMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_OpMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_OpMetadata proto.InternalMessageInfo + +func (m *OpMetadata) GetIgnoreCache() bool { + if m != nil { + return m.IgnoreCache + } + return false +} + +func (m *OpMetadata) GetDescription() map[string]string { + if m != nil { + return m.Description + } + return nil +} + +func (m *OpMetadata) GetExportCache() *ExportCache { + if m != nil { + return m.ExportCache + } + return nil +} + +func (m *OpMetadata) GetCaps() map[github_com_moby_buildkit_util_apicaps.CapID]bool { + if m != nil { + return m.Caps + } + return nil +} + +type ExportCache struct { + Value bool `protobuf:"varint,1,opt,name=Value,proto3" json:"Value,omitempty"` +} + +func (m *ExportCache) Reset() { *m = ExportCache{} } +func (m *ExportCache) String() string { return proto.CompactTextString(m) } +func (*ExportCache) ProtoMessage() {} +func (*ExportCache) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{13} +} +func (m *ExportCache) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportCache) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExportCache) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportCache.Merge(m, src) +} +func (m *ExportCache) XXX_Size() int { + return m.Size() +} +func (m *ExportCache) XXX_DiscardUnknown() { + xxx_messageInfo_ExportCache.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportCache proto.InternalMessageInfo + +func (m *ExportCache) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +type ProxyEnv struct { + HttpProxy string `protobuf:"bytes,1,opt,name=http_proxy,json=httpProxy,proto3" json:"http_proxy,omitempty"` + HttpsProxy string `protobuf:"bytes,2,opt,name=https_proxy,json=httpsProxy,proto3" json:"https_proxy,omitempty"` + FtpProxy string `protobuf:"bytes,3,opt,name=ftp_proxy,json=ftpProxy,proto3" json:"ftp_proxy,omitempty"` + NoProxy string `protobuf:"bytes,4,opt,name=no_proxy,json=noProxy,proto3" json:"no_proxy,omitempty"` +} + +func (m *ProxyEnv) Reset() { *m = ProxyEnv{} } +func (m *ProxyEnv) String() string { return proto.CompactTextString(m) } +func (*ProxyEnv) ProtoMessage() {} +func (*ProxyEnv) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{14} +} +func (m *ProxyEnv) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProxyEnv) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProxyEnv) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProxyEnv.Merge(m, src) +} +func (m *ProxyEnv) XXX_Size() int { + return m.Size() +} +func (m *ProxyEnv) XXX_DiscardUnknown() { + xxx_messageInfo_ProxyEnv.DiscardUnknown(m) +} + +var xxx_messageInfo_ProxyEnv proto.InternalMessageInfo + +func (m *ProxyEnv) GetHttpProxy() string { + if m != nil { + return m.HttpProxy + } + return "" +} + +func (m *ProxyEnv) GetHttpsProxy() string { + if m != nil { + return m.HttpsProxy + } + return "" +} + +func (m *ProxyEnv) GetFtpProxy() string { + if m != nil { + return m.FtpProxy + } + return "" +} + +func (m *ProxyEnv) GetNoProxy() string { + if m != nil { + return m.NoProxy + } + return "" +} + +// WorkerConstraints defines conditions for the worker +type WorkerConstraints struct { + Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` +} + +func (m *WorkerConstraints) Reset() { *m = WorkerConstraints{} } +func (m *WorkerConstraints) String() string { return proto.CompactTextString(m) } +func (*WorkerConstraints) ProtoMessage() {} +func (*WorkerConstraints) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{15} +} +func (m *WorkerConstraints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkerConstraints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkerConstraints) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkerConstraints.Merge(m, src) +} +func (m *WorkerConstraints) XXX_Size() int { + return m.Size() +} +func (m *WorkerConstraints) XXX_DiscardUnknown() { + xxx_messageInfo_WorkerConstraints.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkerConstraints proto.InternalMessageInfo + +func (m *WorkerConstraints) GetFilter() []string { + if m != nil { + return m.Filter + } + return nil +} + +// Definition is the LLB definition structure with per-vertex metadata entries +type Definition struct { + // def is a list of marshaled Op messages + Def [][]byte `protobuf:"bytes,1,rep,name=def,proto3" json:"def,omitempty"` + // metadata contains metadata for the each of the Op messages. + // A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future. + Metadata map[github_com_opencontainers_go_digest.Digest]OpMetadata `protobuf:"bytes,2,rep,name=metadata,proto3,castkey=github.com/opencontainers/go-digest.Digest" json:"metadata" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *Definition) Reset() { *m = Definition{} } +func (m *Definition) String() string { return proto.CompactTextString(m) } +func (*Definition) ProtoMessage() {} +func (*Definition) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{16} +} +func (m *Definition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Definition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Definition) XXX_Merge(src proto.Message) { + xxx_messageInfo_Definition.Merge(m, src) +} +func (m *Definition) XXX_Size() int { + return m.Size() +} +func (m *Definition) XXX_DiscardUnknown() { + xxx_messageInfo_Definition.DiscardUnknown(m) +} + +var xxx_messageInfo_Definition proto.InternalMessageInfo + +func (m *Definition) GetDef() [][]byte { + if m != nil { + return m.Def + } + return nil +} + +func (m *Definition) GetMetadata() map[github_com_opencontainers_go_digest.Digest]OpMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +type HostIP struct { + Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` + IP string `protobuf:"bytes,2,opt,name=IP,proto3" json:"IP,omitempty"` +} + +func (m *HostIP) Reset() { *m = HostIP{} } +func (m *HostIP) String() string { return proto.CompactTextString(m) } +func (*HostIP) ProtoMessage() {} +func (*HostIP) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{17} +} +func (m *HostIP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostIP) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostIP.Merge(m, src) +} +func (m *HostIP) XXX_Size() int { + return m.Size() +} +func (m *HostIP) XXX_DiscardUnknown() { + xxx_messageInfo_HostIP.DiscardUnknown(m) +} + +var xxx_messageInfo_HostIP proto.InternalMessageInfo + +func (m *HostIP) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *HostIP) GetIP() string { + if m != nil { + return m.IP + } + return "" +} + +type FileOp struct { + Actions []*FileAction `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty"` +} + +func (m *FileOp) Reset() { *m = FileOp{} } +func (m *FileOp) String() string { return proto.CompactTextString(m) } +func (*FileOp) ProtoMessage() {} +func (*FileOp) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{18} +} +func (m *FileOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FileOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOp.Merge(m, src) +} +func (m *FileOp) XXX_Size() int { + return m.Size() +} +func (m *FileOp) XXX_DiscardUnknown() { + xxx_messageInfo_FileOp.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOp proto.InternalMessageInfo + +func (m *FileOp) GetActions() []*FileAction { + if m != nil { + return m.Actions + } + return nil +} + +type FileAction struct { + Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` + SecondaryInput InputIndex `protobuf:"varint,2,opt,name=secondaryInput,proto3,customtype=InputIndex" json:"secondaryInput"` + Output OutputIndex `protobuf:"varint,3,opt,name=output,proto3,customtype=OutputIndex" json:"output"` + // Types that are valid to be assigned to Action: + // *FileAction_Copy + // *FileAction_Mkfile + // *FileAction_Mkdir + // *FileAction_Rm + Action isFileAction_Action `protobuf_oneof:"action"` +} + +func (m *FileAction) Reset() { *m = FileAction{} } +func (m *FileAction) String() string { return proto.CompactTextString(m) } +func (*FileAction) ProtoMessage() {} +func (*FileAction) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{19} +} +func (m *FileAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FileAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileAction.Merge(m, src) +} +func (m *FileAction) XXX_Size() int { + return m.Size() +} +func (m *FileAction) XXX_DiscardUnknown() { + xxx_messageInfo_FileAction.DiscardUnknown(m) +} + +var xxx_messageInfo_FileAction proto.InternalMessageInfo + +type isFileAction_Action interface { + isFileAction_Action() + MarshalTo([]byte) (int, error) + Size() int +} + +type FileAction_Copy struct { + Copy *FileActionCopy `protobuf:"bytes,4,opt,name=copy,proto3,oneof" json:"copy,omitempty"` +} +type FileAction_Mkfile struct { + Mkfile *FileActionMkFile `protobuf:"bytes,5,opt,name=mkfile,proto3,oneof" json:"mkfile,omitempty"` +} +type FileAction_Mkdir struct { + Mkdir *FileActionMkDir `protobuf:"bytes,6,opt,name=mkdir,proto3,oneof" json:"mkdir,omitempty"` +} +type FileAction_Rm struct { + Rm *FileActionRm `protobuf:"bytes,7,opt,name=rm,proto3,oneof" json:"rm,omitempty"` +} + +func (*FileAction_Copy) isFileAction_Action() {} +func (*FileAction_Mkfile) isFileAction_Action() {} +func (*FileAction_Mkdir) isFileAction_Action() {} +func (*FileAction_Rm) isFileAction_Action() {} + +func (m *FileAction) GetAction() isFileAction_Action { + if m != nil { + return m.Action + } + return nil +} + +func (m *FileAction) GetCopy() *FileActionCopy { + if x, ok := m.GetAction().(*FileAction_Copy); ok { + return x.Copy + } + return nil +} + +func (m *FileAction) GetMkfile() *FileActionMkFile { + if x, ok := m.GetAction().(*FileAction_Mkfile); ok { + return x.Mkfile + } + return nil +} + +func (m *FileAction) GetMkdir() *FileActionMkDir { + if x, ok := m.GetAction().(*FileAction_Mkdir); ok { + return x.Mkdir + } + return nil +} + +func (m *FileAction) GetRm() *FileActionRm { + if x, ok := m.GetAction().(*FileAction_Rm); ok { + return x.Rm + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*FileAction) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*FileAction_Copy)(nil), + (*FileAction_Mkfile)(nil), + (*FileAction_Mkdir)(nil), + (*FileAction_Rm)(nil), + } +} + +type FileActionCopy struct { + // src is the source path + Src string `protobuf:"bytes,1,opt,name=src,proto3" json:"src,omitempty"` + // dest path + Dest string `protobuf:"bytes,2,opt,name=dest,proto3" json:"dest,omitempty"` + // optional owner override + Owner *ChownOpt `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + // optional permission bits override + Mode int32 `protobuf:"varint,4,opt,name=mode,proto3" json:"mode,omitempty"` + // followSymlink resolves symlinks in src + FollowSymlink bool `protobuf:"varint,5,opt,name=followSymlink,proto3" json:"followSymlink,omitempty"` + // dirCopyContents only copies contents if src is a directory + DirCopyContents bool `protobuf:"varint,6,opt,name=dirCopyContents,proto3" json:"dirCopyContents,omitempty"` + // attemptUnpackDockerCompatibility detects if src is an archive to unpack it instead + AttemptUnpackDockerCompatibility bool `protobuf:"varint,7,opt,name=attemptUnpackDockerCompatibility,proto3" json:"attemptUnpackDockerCompatibility,omitempty"` + // createDestPath creates dest path directories if needed + CreateDestPath bool `protobuf:"varint,8,opt,name=createDestPath,proto3" json:"createDestPath,omitempty"` + // allowWildcard allows filepath.Match wildcards in src path + AllowWildcard bool `protobuf:"varint,9,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` + // allowEmptyWildcard doesn't fail the whole copy if wildcard doesn't resolve to files + AllowEmptyWildcard bool `protobuf:"varint,10,opt,name=allowEmptyWildcard,proto3" json:"allowEmptyWildcard,omitempty"` + // optional created time override + Timestamp int64 `protobuf:"varint,11,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *FileActionCopy) Reset() { *m = FileActionCopy{} } +func (m *FileActionCopy) String() string { return proto.CompactTextString(m) } +func (*FileActionCopy) ProtoMessage() {} +func (*FileActionCopy) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{20} +} +func (m *FileActionCopy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileActionCopy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FileActionCopy) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileActionCopy.Merge(m, src) +} +func (m *FileActionCopy) XXX_Size() int { + return m.Size() +} +func (m *FileActionCopy) XXX_DiscardUnknown() { + xxx_messageInfo_FileActionCopy.DiscardUnknown(m) +} + +var xxx_messageInfo_FileActionCopy proto.InternalMessageInfo + +func (m *FileActionCopy) GetSrc() string { + if m != nil { + return m.Src + } + return "" +} + +func (m *FileActionCopy) GetDest() string { + if m != nil { + return m.Dest + } + return "" +} + +func (m *FileActionCopy) GetOwner() *ChownOpt { + if m != nil { + return m.Owner + } + return nil +} + +func (m *FileActionCopy) GetMode() int32 { + if m != nil { + return m.Mode + } + return 0 +} + +func (m *FileActionCopy) GetFollowSymlink() bool { + if m != nil { + return m.FollowSymlink + } + return false +} + +func (m *FileActionCopy) GetDirCopyContents() bool { + if m != nil { + return m.DirCopyContents + } + return false +} + +func (m *FileActionCopy) GetAttemptUnpackDockerCompatibility() bool { + if m != nil { + return m.AttemptUnpackDockerCompatibility + } + return false +} + +func (m *FileActionCopy) GetCreateDestPath() bool { + if m != nil { + return m.CreateDestPath + } + return false +} + +func (m *FileActionCopy) GetAllowWildcard() bool { + if m != nil { + return m.AllowWildcard + } + return false +} + +func (m *FileActionCopy) GetAllowEmptyWildcard() bool { + if m != nil { + return m.AllowEmptyWildcard + } + return false +} + +func (m *FileActionCopy) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +type FileActionMkFile struct { + // path for the new file + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // permission bits + Mode int32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` + // data is the new file contents + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + // optional owner for the new file + Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` + // optional created time override + Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *FileActionMkFile) Reset() { *m = FileActionMkFile{} } +func (m *FileActionMkFile) String() string { return proto.CompactTextString(m) } +func (*FileActionMkFile) ProtoMessage() {} +func (*FileActionMkFile) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{21} +} +func (m *FileActionMkFile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileActionMkFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FileActionMkFile) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileActionMkFile.Merge(m, src) +} +func (m *FileActionMkFile) XXX_Size() int { + return m.Size() +} +func (m *FileActionMkFile) XXX_DiscardUnknown() { + xxx_messageInfo_FileActionMkFile.DiscardUnknown(m) +} + +var xxx_messageInfo_FileActionMkFile proto.InternalMessageInfo + +func (m *FileActionMkFile) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *FileActionMkFile) GetMode() int32 { + if m != nil { + return m.Mode + } + return 0 +} + +func (m *FileActionMkFile) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *FileActionMkFile) GetOwner() *ChownOpt { + if m != nil { + return m.Owner + } + return nil +} + +func (m *FileActionMkFile) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +type FileActionMkDir struct { + // path for the new directory + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // permission bits + Mode int32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` + // makeParents creates parent directories as well if needed + MakeParents bool `protobuf:"varint,3,opt,name=makeParents,proto3" json:"makeParents,omitempty"` + // optional owner for the new directory + Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` + // optional created time override + Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (m *FileActionMkDir) Reset() { *m = FileActionMkDir{} } +func (m *FileActionMkDir) String() string { return proto.CompactTextString(m) } +func (*FileActionMkDir) ProtoMessage() {} +func (*FileActionMkDir) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{22} +} +func (m *FileActionMkDir) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileActionMkDir) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FileActionMkDir) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileActionMkDir.Merge(m, src) +} +func (m *FileActionMkDir) XXX_Size() int { + return m.Size() +} +func (m *FileActionMkDir) XXX_DiscardUnknown() { + xxx_messageInfo_FileActionMkDir.DiscardUnknown(m) +} + +var xxx_messageInfo_FileActionMkDir proto.InternalMessageInfo + +func (m *FileActionMkDir) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *FileActionMkDir) GetMode() int32 { + if m != nil { + return m.Mode + } + return 0 +} + +func (m *FileActionMkDir) GetMakeParents() bool { + if m != nil { + return m.MakeParents + } + return false +} + +func (m *FileActionMkDir) GetOwner() *ChownOpt { + if m != nil { + return m.Owner + } + return nil +} + +func (m *FileActionMkDir) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +type FileActionRm struct { + // path to remove + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // allowNotFound doesn't fail the rm if file is not found + AllowNotFound bool `protobuf:"varint,2,opt,name=allowNotFound,proto3" json:"allowNotFound,omitempty"` + // allowWildcard allows filepath.Match wildcards in path + AllowWildcard bool `protobuf:"varint,3,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` +} + +func (m *FileActionRm) Reset() { *m = FileActionRm{} } +func (m *FileActionRm) String() string { return proto.CompactTextString(m) } +func (*FileActionRm) ProtoMessage() {} +func (*FileActionRm) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{23} +} +func (m *FileActionRm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileActionRm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FileActionRm) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileActionRm.Merge(m, src) +} +func (m *FileActionRm) XXX_Size() int { + return m.Size() +} +func (m *FileActionRm) XXX_DiscardUnknown() { + xxx_messageInfo_FileActionRm.DiscardUnknown(m) +} + +var xxx_messageInfo_FileActionRm proto.InternalMessageInfo + +func (m *FileActionRm) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *FileActionRm) GetAllowNotFound() bool { + if m != nil { + return m.AllowNotFound + } + return false +} + +func (m *FileActionRm) GetAllowWildcard() bool { + if m != nil { + return m.AllowWildcard + } + return false +} + +type ChownOpt struct { + User *UserOpt `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + Group *UserOpt `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` +} + +func (m *ChownOpt) Reset() { *m = ChownOpt{} } +func (m *ChownOpt) String() string { return proto.CompactTextString(m) } +func (*ChownOpt) ProtoMessage() {} +func (*ChownOpt) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{24} +} +func (m *ChownOpt) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChownOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ChownOpt) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChownOpt.Merge(m, src) +} +func (m *ChownOpt) XXX_Size() int { + return m.Size() +} +func (m *ChownOpt) XXX_DiscardUnknown() { + xxx_messageInfo_ChownOpt.DiscardUnknown(m) +} + +var xxx_messageInfo_ChownOpt proto.InternalMessageInfo + +func (m *ChownOpt) GetUser() *UserOpt { + if m != nil { + return m.User + } + return nil +} + +func (m *ChownOpt) GetGroup() *UserOpt { + if m != nil { + return m.Group + } + return nil +} + +type UserOpt struct { + // Types that are valid to be assigned to User: + // *UserOpt_ByName + // *UserOpt_ByID + User isUserOpt_User `protobuf_oneof:"user"` +} + +func (m *UserOpt) Reset() { *m = UserOpt{} } +func (m *UserOpt) String() string { return proto.CompactTextString(m) } +func (*UserOpt) ProtoMessage() {} +func (*UserOpt) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{25} +} +func (m *UserOpt) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserOpt) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserOpt.Merge(m, src) +} +func (m *UserOpt) XXX_Size() int { + return m.Size() +} +func (m *UserOpt) XXX_DiscardUnknown() { + xxx_messageInfo_UserOpt.DiscardUnknown(m) +} + +var xxx_messageInfo_UserOpt proto.InternalMessageInfo + +type isUserOpt_User interface { + isUserOpt_User() + MarshalTo([]byte) (int, error) + Size() int +} + +type UserOpt_ByName struct { + ByName *NamedUserOpt `protobuf:"bytes,1,opt,name=byName,proto3,oneof" json:"byName,omitempty"` +} +type UserOpt_ByID struct { + ByID uint32 `protobuf:"varint,2,opt,name=byID,proto3,oneof" json:"byID,omitempty"` +} + +func (*UserOpt_ByName) isUserOpt_User() {} +func (*UserOpt_ByID) isUserOpt_User() {} + +func (m *UserOpt) GetUser() isUserOpt_User { + if m != nil { + return m.User + } + return nil +} + +func (m *UserOpt) GetByName() *NamedUserOpt { + if x, ok := m.GetUser().(*UserOpt_ByName); ok { + return x.ByName + } + return nil +} + +func (m *UserOpt) GetByID() uint32 { + if x, ok := m.GetUser().(*UserOpt_ByID); ok { + return x.ByID + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*UserOpt) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*UserOpt_ByName)(nil), + (*UserOpt_ByID)(nil), + } +} + +type NamedUserOpt struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Input InputIndex `protobuf:"varint,2,opt,name=input,proto3,customtype=InputIndex" json:"input"` +} + +func (m *NamedUserOpt) Reset() { *m = NamedUserOpt{} } +func (m *NamedUserOpt) String() string { return proto.CompactTextString(m) } +func (*NamedUserOpt) ProtoMessage() {} +func (*NamedUserOpt) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{26} +} +func (m *NamedUserOpt) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedUserOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedUserOpt) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedUserOpt.Merge(m, src) +} +func (m *NamedUserOpt) XXX_Size() int { + return m.Size() +} +func (m *NamedUserOpt) XXX_DiscardUnknown() { + xxx_messageInfo_NamedUserOpt.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedUserOpt proto.InternalMessageInfo + +func (m *NamedUserOpt) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterEnum("pb.NetMode", NetMode_name, NetMode_value) + proto.RegisterEnum("pb.SecurityMode", SecurityMode_name, SecurityMode_value) + proto.RegisterEnum("pb.MountType", MountType_name, MountType_value) + proto.RegisterEnum("pb.CacheSharingOpt", CacheSharingOpt_name, CacheSharingOpt_value) + proto.RegisterType((*Op)(nil), "pb.Op") + proto.RegisterType((*Platform)(nil), "pb.Platform") + proto.RegisterType((*Input)(nil), "pb.Input") + proto.RegisterType((*ExecOp)(nil), "pb.ExecOp") + proto.RegisterType((*Meta)(nil), "pb.Meta") + proto.RegisterType((*Mount)(nil), "pb.Mount") + proto.RegisterType((*CacheOpt)(nil), "pb.CacheOpt") + proto.RegisterType((*SecretOpt)(nil), "pb.SecretOpt") + proto.RegisterType((*SSHOpt)(nil), "pb.SSHOpt") + proto.RegisterType((*SourceOp)(nil), "pb.SourceOp") + proto.RegisterMapType((map[string]string)(nil), "pb.SourceOp.AttrsEntry") + proto.RegisterType((*BuildOp)(nil), "pb.BuildOp") + proto.RegisterMapType((map[string]string)(nil), "pb.BuildOp.AttrsEntry") + proto.RegisterMapType((map[string]*BuildInput)(nil), "pb.BuildOp.InputsEntry") + proto.RegisterType((*BuildInput)(nil), "pb.BuildInput") + proto.RegisterType((*OpMetadata)(nil), "pb.OpMetadata") + proto.RegisterMapType((map[github_com_moby_buildkit_util_apicaps.CapID]bool)(nil), "pb.OpMetadata.CapsEntry") + proto.RegisterMapType((map[string]string)(nil), "pb.OpMetadata.DescriptionEntry") + proto.RegisterType((*ExportCache)(nil), "pb.ExportCache") + proto.RegisterType((*ProxyEnv)(nil), "pb.ProxyEnv") + proto.RegisterType((*WorkerConstraints)(nil), "pb.WorkerConstraints") + proto.RegisterType((*Definition)(nil), "pb.Definition") + proto.RegisterMapType((map[github_com_opencontainers_go_digest.Digest]OpMetadata)(nil), "pb.Definition.MetadataEntry") + proto.RegisterType((*HostIP)(nil), "pb.HostIP") + proto.RegisterType((*FileOp)(nil), "pb.FileOp") + proto.RegisterType((*FileAction)(nil), "pb.FileAction") + proto.RegisterType((*FileActionCopy)(nil), "pb.FileActionCopy") + proto.RegisterType((*FileActionMkFile)(nil), "pb.FileActionMkFile") + proto.RegisterType((*FileActionMkDir)(nil), "pb.FileActionMkDir") + proto.RegisterType((*FileActionRm)(nil), "pb.FileActionRm") + proto.RegisterType((*ChownOpt)(nil), "pb.ChownOpt") + proto.RegisterType((*UserOpt)(nil), "pb.UserOpt") + proto.RegisterType((*NamedUserOpt)(nil), "pb.NamedUserOpt") +} + +func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) } + +var fileDescriptor_8de16154b2733812 = []byte{ + // 1978 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5f, 0x6f, 0x1b, 0xc7, + 0x11, 0xd7, 0x1d, 0xff, 0xde, 0x50, 0x92, 0xd9, 0x8d, 0x93, 0xb2, 0xaa, 0x2b, 0x29, 0x97, 0x34, + 0x90, 0x65, 0x5b, 0x02, 0x14, 0x20, 0x09, 0xf2, 0x50, 0x54, 0xfc, 0x63, 0x88, 0x49, 0x2c, 0x0a, + 0x4b, 0xdb, 0xe9, 0x9b, 0x71, 0xbc, 0x5b, 0x52, 0x07, 0xf2, 0x6e, 0x0f, 0x7b, 0x4b, 0x5b, 0x7c, + 0xe9, 0x83, 0x3f, 0x41, 0x80, 0x02, 0x7d, 0x6b, 0x81, 0xbe, 0x14, 0xe8, 0x87, 0xe8, 0x7b, 0x1e, + 0x83, 0xa2, 0x0f, 0x69, 0x1f, 0xd2, 0xc2, 0xfe, 0x22, 0xc5, 0xec, 0xee, 0xf1, 0x8e, 0xb4, 0x02, + 0xdb, 0x68, 0xd1, 0x27, 0xce, 0xcd, 0xfc, 0x76, 0x76, 0x76, 0x66, 0x76, 0x66, 0x96, 0xe0, 0xf0, + 0x24, 0x3d, 0x4a, 0x04, 0x97, 0x9c, 0xd8, 0xc9, 0x68, 0xe7, 0xde, 0x24, 0x94, 0x97, 0xf3, 0xd1, + 0x91, 0xcf, 0xa3, 0xe3, 0x09, 0x9f, 0xf0, 0x63, 0x25, 0x1a, 0xcd, 0xc7, 0xea, 0x4b, 0x7d, 0x28, + 0x4a, 0x2f, 0x71, 0xff, 0x64, 0x83, 0x3d, 0x48, 0xc8, 0xfb, 0x50, 0x0d, 0xe3, 0x64, 0x2e, 0xd3, + 0x96, 0xb5, 0x5f, 0x3a, 0x68, 0x9c, 0x38, 0x47, 0xc9, 0xe8, 0xa8, 0x8f, 0x1c, 0x6a, 0x04, 0x64, + 0x1f, 0xca, 0xec, 0x8a, 0xf9, 0x2d, 0x7b, 0xdf, 0x3a, 0x68, 0x9c, 0x00, 0x02, 0x7a, 0x57, 0xcc, + 0x1f, 0x24, 0x67, 0x1b, 0x54, 0x49, 0xc8, 0x47, 0x50, 0x4d, 0xf9, 0x5c, 0xf8, 0xac, 0x55, 0x52, + 0x98, 0x4d, 0xc4, 0x0c, 0x15, 0x47, 0xa1, 0x8c, 0x14, 0x35, 0x8d, 0xc3, 0x19, 0x6b, 0x95, 0x73, + 0x4d, 0xf7, 0xc3, 0x99, 0xc6, 0x28, 0x09, 0xf9, 0x00, 0x2a, 0xa3, 0x79, 0x38, 0x0b, 0x5a, 0x15, + 0x05, 0x69, 0x20, 0xa4, 0x8d, 0x0c, 0x85, 0xd1, 0x32, 0x72, 0x00, 0xf5, 0x64, 0xe6, 0xc9, 0x31, + 0x17, 0x51, 0x0b, 0xf2, 0x0d, 0x2f, 0x0c, 0x8f, 0x2e, 0xa5, 0xe4, 0x53, 0x68, 0xf8, 0x3c, 0x4e, + 0xa5, 0xf0, 0xc2, 0x58, 0xa6, 0xad, 0x86, 0x02, 0xbf, 0x8b, 0xe0, 0xaf, 0xb9, 0x98, 0x32, 0xd1, + 0xc9, 0x85, 0xb4, 0x88, 0x6c, 0x97, 0xc1, 0xe6, 0x89, 0xfb, 0x7b, 0x0b, 0xea, 0x99, 0x56, 0xe2, + 0xc2, 0xe6, 0xa9, 0xf0, 0x2f, 0x43, 0xc9, 0x7c, 0x39, 0x17, 0xac, 0x65, 0xed, 0x5b, 0x07, 0x0e, + 0x5d, 0xe1, 0x91, 0x6d, 0xb0, 0x07, 0x43, 0xe5, 0x28, 0x87, 0xda, 0x83, 0x21, 0x69, 0x41, 0xed, + 0xb1, 0x27, 0x42, 0x2f, 0x96, 0xca, 0x33, 0x0e, 0xcd, 0x3e, 0xc9, 0x2d, 0x70, 0x06, 0xc3, 0xc7, + 0x4c, 0xa4, 0x21, 0x8f, 0x95, 0x3f, 0x1c, 0x9a, 0x33, 0xc8, 0x2e, 0xc0, 0x60, 0x78, 0x9f, 0x79, + 0xa8, 0x34, 0x6d, 0x55, 0xf6, 0x4b, 0x07, 0x0e, 0x2d, 0x70, 0xdc, 0xdf, 0x42, 0x45, 0xc5, 0x88, + 0x7c, 0x01, 0xd5, 0x20, 0x9c, 0xb0, 0x54, 0x6a, 0x73, 0xda, 0x27, 0xdf, 0xfe, 0xb0, 0xb7, 0xf1, + 0xcf, 0x1f, 0xf6, 0x0e, 0x0b, 0xc9, 0xc0, 0x13, 0x16, 0xfb, 0x3c, 0x96, 0x5e, 0x18, 0x33, 0x91, + 0x1e, 0x4f, 0xf8, 0x3d, 0xbd, 0xe4, 0xa8, 0xab, 0x7e, 0xa8, 0xd1, 0x40, 0x6e, 0x43, 0x25, 0x8c, + 0x03, 0x76, 0xa5, 0xec, 0x2f, 0xb5, 0xdf, 0x31, 0xaa, 0x1a, 0x83, 0xb9, 0x4c, 0xe6, 0xb2, 0x8f, + 0x22, 0xaa, 0x11, 0xee, 0x1f, 0x2d, 0xa8, 0xea, 0x1c, 0x20, 0xb7, 0xa0, 0x1c, 0x31, 0xe9, 0xa9, + 0xfd, 0x1b, 0x27, 0x75, 0xf4, 0xed, 0x03, 0x26, 0x3d, 0xaa, 0xb8, 0x98, 0x5e, 0x11, 0x9f, 0xa3, + 0xef, 0xed, 0x3c, 0xbd, 0x1e, 0x20, 0x87, 0x1a, 0x01, 0xf9, 0x25, 0xd4, 0x62, 0x26, 0x9f, 0x71, + 0x31, 0x55, 0x3e, 0xda, 0xd6, 0x41, 0x3f, 0x67, 0xf2, 0x01, 0x0f, 0x18, 0xcd, 0x64, 0xe4, 0x2e, + 0xd4, 0x53, 0xe6, 0xcf, 0x45, 0x28, 0x17, 0xca, 0x5f, 0xdb, 0x27, 0x4d, 0x95, 0x65, 0x86, 0xa7, + 0xc0, 0x4b, 0x84, 0xfb, 0x17, 0x0b, 0xca, 0x68, 0x06, 0x21, 0x50, 0xf6, 0xc4, 0x44, 0x67, 0xb7, + 0x43, 0x15, 0x4d, 0x9a, 0x50, 0x62, 0xf1, 0x53, 0x65, 0x91, 0x43, 0x91, 0x44, 0x8e, 0xff, 0x2c, + 0x30, 0x31, 0x42, 0x12, 0xd7, 0xcd, 0x53, 0x26, 0x4c, 0x68, 0x14, 0x4d, 0x6e, 0x83, 0x93, 0x08, + 0x7e, 0xb5, 0x78, 0x82, 0xab, 0x2b, 0x85, 0xc4, 0x43, 0x66, 0x2f, 0x7e, 0x4a, 0xeb, 0x89, 0xa1, + 0xc8, 0x21, 0x00, 0xbb, 0x92, 0xc2, 0x3b, 0xe3, 0xa9, 0x4c, 0x5b, 0x55, 0x75, 0x76, 0x95, 0xef, + 0xc8, 0xe8, 0x5f, 0xd0, 0x82, 0xd4, 0xfd, 0x9b, 0x0d, 0x15, 0xe5, 0x12, 0x72, 0x80, 0x11, 0x48, + 0xe6, 0x3a, 0x98, 0xa5, 0x36, 0x31, 0x11, 0x00, 0x15, 0xeb, 0x65, 0x00, 0x30, 0xee, 0x3b, 0xe8, + 0x8d, 0x19, 0xf3, 0x25, 0x17, 0x26, 0xdd, 0x96, 0xdf, 0x68, 0x7a, 0x80, 0x19, 0xa1, 0x4f, 0xa3, + 0x68, 0x72, 0x07, 0xaa, 0x5c, 0x85, 0x51, 0x1d, 0xe8, 0x47, 0x82, 0x6b, 0x20, 0xa8, 0x5c, 0x30, + 0x2f, 0xe0, 0xf1, 0x6c, 0xa1, 0x8e, 0x59, 0xa7, 0xcb, 0x6f, 0x72, 0x07, 0x1c, 0x15, 0xb7, 0x87, + 0x8b, 0x84, 0xb5, 0xaa, 0x2a, 0x0e, 0x5b, 0xcb, 0x98, 0x22, 0x93, 0xe6, 0x72, 0xbc, 0xa8, 0xbe, + 0xe7, 0x5f, 0xb2, 0x41, 0x22, 0x5b, 0x37, 0x73, 0x7f, 0x75, 0x0c, 0x8f, 0x2e, 0xa5, 0xa8, 0x36, + 0x65, 0xbe, 0x60, 0x12, 0xa1, 0xef, 0x2a, 0xe8, 0x96, 0x09, 0xaf, 0x66, 0xd2, 0x5c, 0x4e, 0x5c, + 0xa8, 0x0e, 0x87, 0x67, 0x88, 0x7c, 0x2f, 0x2f, 0x24, 0x9a, 0x43, 0x8d, 0xc4, 0xed, 0x43, 0x3d, + 0xdb, 0x06, 0x6f, 0x65, 0xbf, 0x6b, 0xee, 0xab, 0xdd, 0xef, 0x92, 0x7b, 0x50, 0x4b, 0x2f, 0x3d, + 0x11, 0xc6, 0x13, 0xe5, 0xbb, 0xed, 0x93, 0x77, 0x96, 0x56, 0x0d, 0x35, 0x1f, 0x35, 0x65, 0x18, + 0x97, 0x83, 0xb3, 0x34, 0xe3, 0x15, 0x5d, 0x4d, 0x28, 0xcd, 0xc3, 0x40, 0xe9, 0xd9, 0xa2, 0x48, + 0x22, 0x67, 0x12, 0xea, 0x5c, 0xda, 0xa2, 0x48, 0x62, 0x40, 0x22, 0x1e, 0xe8, 0xb2, 0xb7, 0x45, + 0x15, 0x8d, 0x3e, 0xe6, 0x89, 0x0c, 0x79, 0xec, 0xcd, 0x32, 0x1f, 0x67, 0xdf, 0xee, 0x2c, 0x3b, + 0xdf, 0xff, 0x65, 0xb7, 0xdf, 0x59, 0x50, 0xcf, 0x6a, 0x35, 0x16, 0x9e, 0x30, 0x60, 0xb1, 0x0c, + 0xc7, 0x21, 0x13, 0x66, 0xe3, 0x02, 0x87, 0xdc, 0x83, 0x8a, 0x27, 0xa5, 0xc8, 0xae, 0xf3, 0x4f, + 0x8b, 0x85, 0xfe, 0xe8, 0x14, 0x25, 0xbd, 0x58, 0x8a, 0x05, 0xd5, 0xa8, 0x9d, 0xcf, 0x00, 0x72, + 0x26, 0xda, 0x3a, 0x65, 0x0b, 0xa3, 0x15, 0x49, 0x72, 0x13, 0x2a, 0x4f, 0xbd, 0xd9, 0x9c, 0x99, + 0x1c, 0xd6, 0x1f, 0x9f, 0xdb, 0x9f, 0x59, 0xee, 0x5f, 0x6d, 0xa8, 0x99, 0xc2, 0x4f, 0xee, 0x42, + 0x4d, 0x15, 0x7e, 0x63, 0xd1, 0xf5, 0x17, 0x23, 0x83, 0x90, 0xe3, 0x65, 0x47, 0x2b, 0xd8, 0x68, + 0x54, 0xe9, 0xce, 0x66, 0x6c, 0xcc, 0xfb, 0x5b, 0x29, 0x60, 0x63, 0xd3, 0xba, 0xb6, 0x11, 0xdd, + 0x65, 0xe3, 0x30, 0x0e, 0xd1, 0x3f, 0x14, 0x45, 0xe4, 0x6e, 0x76, 0xea, 0xb2, 0xd2, 0xf8, 0x5e, + 0x51, 0xe3, 0xab, 0x87, 0xee, 0x43, 0xa3, 0xb0, 0xcd, 0x35, 0xa7, 0xfe, 0xb0, 0x78, 0x6a, 0xb3, + 0xa5, 0x52, 0xa7, 0xfb, 0x6e, 0xee, 0x85, 0xff, 0xc2, 0x7f, 0x9f, 0x00, 0xe4, 0x2a, 0xdf, 0xbc, + 0xb0, 0xb8, 0xcf, 0x4b, 0x00, 0x83, 0x04, 0x4b, 0x67, 0xe0, 0xa9, 0xfa, 0xbd, 0x19, 0x4e, 0x62, + 0x2e, 0xd8, 0x13, 0x75, 0x55, 0xd5, 0xfa, 0x3a, 0x6d, 0x68, 0x9e, 0xba, 0x31, 0xe4, 0x14, 0x1a, + 0x01, 0x4b, 0x7d, 0x11, 0xaa, 0x84, 0x32, 0x4e, 0xdf, 0xc3, 0x33, 0xe5, 0x7a, 0x8e, 0xba, 0x39, + 0x42, 0xfb, 0xaa, 0xb8, 0x86, 0x9c, 0xc0, 0x26, 0xbb, 0x4a, 0xb8, 0x90, 0x66, 0x17, 0x3d, 0x1f, + 0xdc, 0xd0, 0x93, 0x06, 0xf2, 0xd5, 0x4e, 0xb4, 0xc1, 0xf2, 0x0f, 0xe2, 0x41, 0xd9, 0xf7, 0x12, + 0xdd, 0x1c, 0x1b, 0x27, 0xad, 0xb5, 0xfd, 0x3a, 0x5e, 0xa2, 0x9d, 0xd6, 0xfe, 0x18, 0xcf, 0xfa, + 0xfc, 0x5f, 0x7b, 0x77, 0x0a, 0x1d, 0x31, 0xe2, 0xa3, 0xc5, 0xb1, 0xca, 0x97, 0x69, 0x28, 0x8f, + 0xe7, 0x32, 0x9c, 0x1d, 0x7b, 0x49, 0x88, 0xea, 0x70, 0x61, 0xbf, 0x4b, 0x95, 0xea, 0x9d, 0x5f, + 0x41, 0x73, 0xdd, 0xee, 0xb7, 0x89, 0xc1, 0xce, 0xa7, 0xe0, 0x2c, 0xed, 0x78, 0xdd, 0xc2, 0x7a, + 0x31, 0x78, 0x1f, 0x40, 0xa3, 0x70, 0x6e, 0x04, 0x3e, 0x56, 0x40, 0xed, 0x7d, 0xfd, 0xe1, 0x3e, + 0xc7, 0xe1, 0x24, 0xeb, 0x37, 0xbf, 0x00, 0xb8, 0x94, 0x32, 0x79, 0xa2, 0x1a, 0x90, 0xd9, 0xc4, + 0x41, 0x8e, 0x42, 0x90, 0x3d, 0x68, 0xe0, 0x47, 0x6a, 0xe4, 0xda, 0x52, 0xb5, 0x22, 0xd5, 0x80, + 0x9f, 0x83, 0x33, 0x5e, 0x2e, 0xd7, 0x8d, 0xa3, 0x3e, 0xce, 0x56, 0xff, 0x0c, 0xea, 0x31, 0x37, + 0x32, 0xdd, 0x0f, 0x6b, 0x31, 0x57, 0x22, 0xf7, 0x0e, 0xfc, 0xe4, 0x95, 0x49, 0x8a, 0xbc, 0x07, + 0xd5, 0x71, 0x38, 0x93, 0xea, 0xba, 0x62, 0x8b, 0x35, 0x5f, 0xee, 0x3f, 0x2c, 0x80, 0xfc, 0x6a, + 0xa1, 0x47, 0xf0, 0xde, 0x21, 0x66, 0x53, 0xdf, 0xb3, 0x19, 0xd4, 0x23, 0x13, 0x41, 0x93, 0x47, + 0xb7, 0x56, 0xaf, 0xe3, 0x51, 0x16, 0x60, 0x1d, 0xdb, 0x13, 0x13, 0xdb, 0xb7, 0x99, 0x76, 0x96, + 0x3b, 0xec, 0x7c, 0x09, 0x5b, 0x2b, 0xea, 0xde, 0xf0, 0xa6, 0xe6, 0x59, 0x56, 0x0c, 0xd9, 0x5d, + 0xa8, 0xea, 0xd6, 0x8e, 0xf5, 0x17, 0x29, 0xa3, 0x46, 0xd1, 0xaa, 0x8e, 0x5f, 0x64, 0x73, 0x61, + 0xff, 0xc2, 0x3d, 0x81, 0xaa, 0x1e, 0x7c, 0xc9, 0x01, 0xd4, 0x3c, 0x1f, 0x8f, 0x96, 0x95, 0xab, + 0xed, 0x6c, 0x2a, 0x3e, 0x55, 0x6c, 0x9a, 0x89, 0xdd, 0xbf, 0xdb, 0x00, 0x39, 0xff, 0x2d, 0x66, + 0x85, 0xcf, 0x61, 0x3b, 0x65, 0x3e, 0x8f, 0x03, 0x4f, 0x2c, 0x94, 0xd4, 0x0c, 0x78, 0xd7, 0x2d, + 0x59, 0x43, 0x16, 0xe6, 0x86, 0xd2, 0xeb, 0xe7, 0x86, 0x03, 0x28, 0xfb, 0x3c, 0x59, 0x98, 0xeb, + 0x4b, 0x56, 0x0f, 0xd2, 0xe1, 0xc9, 0x02, 0xc7, 0x7c, 0x44, 0x90, 0x23, 0xa8, 0x46, 0x53, 0xf5, + 0x14, 0xd0, 0x63, 0xd4, 0xcd, 0x55, 0xec, 0x83, 0x29, 0xd2, 0xf8, 0x70, 0xd0, 0x28, 0x72, 0x07, + 0x2a, 0xd1, 0x34, 0x08, 0x85, 0x9a, 0x38, 0x1a, 0xba, 0x5f, 0x17, 0xe1, 0xdd, 0x50, 0xe0, 0xf3, + 0x40, 0x61, 0x88, 0x0b, 0xb6, 0x88, 0x5a, 0x35, 0x85, 0x6c, 0xae, 0x79, 0x33, 0x3a, 0xdb, 0xa0, + 0xb6, 0x88, 0xda, 0x75, 0xa8, 0x6a, 0xbf, 0xba, 0x7f, 0x2e, 0xc1, 0xf6, 0xaa, 0x95, 0x98, 0x07, + 0xa9, 0xf0, 0xb3, 0x3c, 0x48, 0x85, 0xbf, 0x1c, 0xa9, 0xec, 0xc2, 0x48, 0xe5, 0x42, 0x85, 0x3f, + 0x8b, 0x99, 0x28, 0xbe, 0x79, 0x3a, 0x97, 0xfc, 0x59, 0x8c, 0xc3, 0x83, 0x16, 0xad, 0xf4, 0xe2, + 0x8a, 0xe9, 0xc5, 0x1f, 0xc2, 0xd6, 0x98, 0xcf, 0x66, 0xfc, 0xd9, 0x70, 0x11, 0xcd, 0xc2, 0x78, + 0x6a, 0x1a, 0xf2, 0x2a, 0x93, 0x1c, 0xc0, 0x8d, 0x20, 0x14, 0x68, 0x4e, 0x87, 0xc7, 0x92, 0xc5, + 0x6a, 0x8a, 0x44, 0xdc, 0x3a, 0x9b, 0x7c, 0x01, 0xfb, 0x9e, 0x94, 0x2c, 0x4a, 0xe4, 0xa3, 0x38, + 0xf1, 0xfc, 0x69, 0x97, 0xfb, 0xea, 0x3e, 0x46, 0x89, 0x27, 0xc3, 0x51, 0x38, 0xc3, 0x81, 0xb9, + 0xa6, 0x96, 0xbe, 0x16, 0x47, 0x3e, 0x82, 0x6d, 0x5f, 0x30, 0x4f, 0xb2, 0x2e, 0x4b, 0xe5, 0x85, + 0x27, 0x2f, 0x5b, 0x75, 0xb5, 0x72, 0x8d, 0x8b, 0x67, 0xf0, 0xd0, 0xda, 0xaf, 0xc3, 0x59, 0xe0, + 0x7b, 0x22, 0x68, 0x39, 0xfa, 0x0c, 0x2b, 0x4c, 0x72, 0x04, 0x44, 0x31, 0x7a, 0x51, 0x22, 0x17, + 0x4b, 0x28, 0x28, 0xe8, 0x35, 0x12, 0x7c, 0x13, 0xc9, 0x30, 0x62, 0xa9, 0xf4, 0xa2, 0x44, 0xbd, + 0xd5, 0x4a, 0x34, 0x67, 0xb8, 0xdf, 0x58, 0xd0, 0x5c, 0x4f, 0x11, 0x74, 0x70, 0x82, 0x66, 0x9a, + 0xcb, 0x86, 0xf4, 0xd2, 0xe9, 0x76, 0xc1, 0xe9, 0x18, 0x40, 0xac, 0x2a, 0x18, 0xab, 0x4d, 0xaa, + 0xe8, 0x3c, 0x80, 0xe5, 0x1f, 0x0f, 0xe0, 0x8a, 0x49, 0x95, 0x75, 0x93, 0xfe, 0x60, 0xc1, 0x8d, + 0xb5, 0x34, 0x7c, 0x63, 0x8b, 0xf6, 0xa1, 0x11, 0x79, 0x53, 0x76, 0xe1, 0x09, 0x15, 0xdc, 0x92, + 0x6e, 0xac, 0x05, 0xd6, 0xff, 0xc0, 0xbe, 0x18, 0x36, 0x8b, 0xb9, 0x7f, 0xad, 0x6d, 0x59, 0x28, + 0xcf, 0xb9, 0xbc, 0xcf, 0xe7, 0x71, 0x60, 0xba, 0xd1, 0x2a, 0xf3, 0xd5, 0x80, 0x97, 0xae, 0x09, + 0xb8, 0x7b, 0x0e, 0xf5, 0xcc, 0x40, 0xb2, 0x67, 0x1e, 0x50, 0x56, 0xfe, 0x90, 0x7f, 0x94, 0x32, + 0x81, 0xb6, 0xeb, 0xd7, 0xd4, 0xfb, 0x50, 0x99, 0x08, 0x3e, 0x4f, 0x4c, 0x6d, 0x5d, 0x41, 0x68, + 0x89, 0x3b, 0x84, 0x9a, 0xe1, 0x90, 0x43, 0xa8, 0x8e, 0x16, 0xe7, 0x5e, 0xc4, 0x8c, 0x42, 0x75, + 0xb1, 0xf1, 0x3b, 0x30, 0x08, 0xac, 0x16, 0x1a, 0x41, 0x6e, 0x42, 0x79, 0xb4, 0xe8, 0x77, 0xf5, + 0x98, 0x8c, 0x35, 0x07, 0xbf, 0xda, 0x55, 0x6d, 0x90, 0xfb, 0x15, 0x6c, 0x16, 0xd7, 0xa1, 0x53, + 0xe2, 0x4c, 0xaf, 0x43, 0x15, 0x9d, 0x17, 0x57, 0xfb, 0x35, 0xc5, 0xf5, 0xf0, 0x00, 0x6a, 0xe6, + 0xa9, 0x4a, 0x1c, 0xa8, 0x3c, 0x3a, 0x1f, 0xf6, 0x1e, 0x36, 0x37, 0x48, 0x1d, 0xca, 0x67, 0x83, + 0xe1, 0xc3, 0xa6, 0x85, 0xd4, 0xf9, 0xe0, 0xbc, 0xd7, 0xb4, 0x0f, 0x6f, 0xc3, 0x66, 0xf1, 0xb1, + 0x4a, 0x1a, 0x50, 0x1b, 0x9e, 0x9e, 0x77, 0xdb, 0x83, 0xdf, 0x34, 0x37, 0xc8, 0x26, 0xd4, 0xfb, + 0xe7, 0xc3, 0x5e, 0xe7, 0x11, 0xed, 0x35, 0xad, 0xc3, 0x5f, 0x83, 0xb3, 0x7c, 0x4f, 0xa1, 0x86, + 0x76, 0xff, 0xbc, 0xdb, 0xdc, 0x20, 0x00, 0xd5, 0x61, 0xaf, 0x43, 0x7b, 0xa8, 0xb7, 0x06, 0xa5, + 0xe1, 0xf0, 0xac, 0x69, 0xe3, 0xae, 0x9d, 0xd3, 0xce, 0x59, 0xaf, 0x59, 0x42, 0xf2, 0xe1, 0x83, + 0x8b, 0xfb, 0xc3, 0x66, 0xf9, 0xf0, 0x13, 0xb8, 0xb1, 0xf6, 0x9e, 0x51, 0xab, 0xcf, 0x4e, 0x69, + 0x0f, 0x35, 0x35, 0xa0, 0x76, 0x41, 0xfb, 0x8f, 0x4f, 0x1f, 0xf6, 0x9a, 0x16, 0x0a, 0xbe, 0x1a, + 0x74, 0xbe, 0xec, 0x75, 0x9b, 0x76, 0xfb, 0xd6, 0xb7, 0x2f, 0x76, 0xad, 0xef, 0x5e, 0xec, 0x5a, + 0xdf, 0xbf, 0xd8, 0xb5, 0xfe, 0xfd, 0x62, 0xd7, 0xfa, 0xe6, 0xe5, 0xee, 0xc6, 0x77, 0x2f, 0x77, + 0x37, 0xbe, 0x7f, 0xb9, 0xbb, 0x31, 0xaa, 0xaa, 0xbf, 0x8e, 0x3e, 0xfe, 0x4f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x87, 0x95, 0x80, 0x20, 0x7a, 0x12, 0x00, 0x00, +} + +func (m *Op) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Op) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Op) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Constraints != nil { + { + size, err := m.Constraints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.Platform != nil { + { + size, err := m.Platform.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.Op != nil { + { + size := m.Op.Size() + i -= size + if _, err := m.Op.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.Inputs) > 0 { + for iNdEx := len(m.Inputs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Inputs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Op_Exec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Op_Exec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Exec != nil { + { + size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Op_Source) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Op_Source) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Source != nil { + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Op_File) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Op_File) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.File != nil { + { + size, err := m.File.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Op_Build) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Op_Build) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Build != nil { + { + size, err := m.Build.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Platform) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Platform) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Platform) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.OSFeatures) > 0 { + for iNdEx := len(m.OSFeatures) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.OSFeatures[iNdEx]) + copy(dAtA[i:], m.OSFeatures[iNdEx]) + i = encodeVarintOps(dAtA, i, uint64(len(m.OSFeatures[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.OSVersion) > 0 { + i -= len(m.OSVersion) + copy(dAtA[i:], m.OSVersion) + i = encodeVarintOps(dAtA, i, uint64(len(m.OSVersion))) + i-- + dAtA[i] = 0x22 + } + if len(m.Variant) > 0 { + i -= len(m.Variant) + copy(dAtA[i:], m.Variant) + i = encodeVarintOps(dAtA, i, uint64(len(m.Variant))) + i-- + dAtA[i] = 0x1a + } + if len(m.OS) > 0 { + i -= len(m.OS) + copy(dAtA[i:], m.OS) + i = encodeVarintOps(dAtA, i, uint64(len(m.OS))) + i-- + dAtA[i] = 0x12 + } + if len(m.Architecture) > 0 { + i -= len(m.Architecture) + copy(dAtA[i:], m.Architecture) + i = encodeVarintOps(dAtA, i, uint64(len(m.Architecture))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Input) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Input) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Input) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Index != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if len(m.Digest) > 0 { + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintOps(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Security != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Security)) + i-- + dAtA[i] = 0x20 + } + if m.Network != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Network)) + i-- + dAtA[i] = 0x18 + } + if len(m.Mounts) > 0 { + for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Meta != nil { + { + size, err := m.Meta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Meta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Meta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Meta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ExtraHosts) > 0 { + for iNdEx := len(m.ExtraHosts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExtraHosts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.ProxyEnv != nil { + { + size, err := m.ProxyEnv.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintOps(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x22 + } + if len(m.Cwd) > 0 { + i -= len(m.Cwd) + copy(dAtA[i:], m.Cwd) + i = encodeVarintOps(dAtA, i, uint64(len(m.Cwd))) + i-- + dAtA[i] = 0x1a + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Env[iNdEx]) + copy(dAtA[i:], m.Env[iNdEx]) + i = encodeVarintOps(dAtA, i, uint64(len(m.Env[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintOps(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Mount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SSHOpt != nil { + { + size, err := m.SSHOpt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.SecretOpt != nil { + { + size, err := m.SecretOpt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.CacheOpt != nil { + { + size, err := m.CacheOpt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.MountType != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.MountType)) + i-- + dAtA[i] = 0x30 + } + if m.Readonly { + i-- + if m.Readonly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Output != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Output)) + i-- + dAtA[i] = 0x20 + } + if len(m.Dest) > 0 { + i -= len(m.Dest) + copy(dAtA[i:], m.Dest) + i = encodeVarintOps(dAtA, i, uint64(len(m.Dest))) + i-- + dAtA[i] = 0x1a + } + if len(m.Selector) > 0 { + i -= len(m.Selector) + copy(dAtA[i:], m.Selector) + i = encodeVarintOps(dAtA, i, uint64(len(m.Selector))) + i-- + dAtA[i] = 0x12 + } + if m.Input != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Input)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CacheOpt) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CacheOpt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CacheOpt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sharing != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Sharing)) + i-- + dAtA[i] = 0x10 + } + if len(m.ID) > 0 { + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SecretOpt) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretOpt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretOpt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional { + i-- + if m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Mode != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x20 + } + if m.Gid != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Gid)) + i-- + dAtA[i] = 0x18 + } + if m.Uid != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x10 + } + if len(m.ID) > 0 { + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SSHOpt) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SSHOpt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SSHOpt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional { + i-- + if m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Mode != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x20 + } + if m.Gid != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Gid)) + i-- + dAtA[i] = 0x18 + } + if m.Uid != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x10 + } + if len(m.ID) > 0 { + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SourceOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attrs) > 0 { + keysForAttrs := make([]string, 0, len(m.Attrs)) + for k := range m.Attrs { + keysForAttrs = append(keysForAttrs, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttrs) + for iNdEx := len(keysForAttrs) - 1; iNdEx >= 0; iNdEx-- { + v := m.Attrs[string(keysForAttrs[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintOps(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAttrs[iNdEx]) + copy(dAtA[i:], keysForAttrs[iNdEx]) + i = encodeVarintOps(dAtA, i, uint64(len(keysForAttrs[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintOps(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Identifier) > 0 { + i -= len(m.Identifier) + copy(dAtA[i:], m.Identifier) + i = encodeVarintOps(dAtA, i, uint64(len(m.Identifier))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BuildOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attrs) > 0 { + keysForAttrs := make([]string, 0, len(m.Attrs)) + for k := range m.Attrs { + keysForAttrs = append(keysForAttrs, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttrs) + for iNdEx := len(keysForAttrs) - 1; iNdEx >= 0; iNdEx-- { + v := m.Attrs[string(keysForAttrs[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintOps(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAttrs[iNdEx]) + copy(dAtA[i:], keysForAttrs[iNdEx]) + i = encodeVarintOps(dAtA, i, uint64(len(keysForAttrs[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintOps(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.Def != nil { + { + size, err := m.Def.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Inputs) > 0 { + keysForInputs := make([]string, 0, len(m.Inputs)) + for k := range m.Inputs { + keysForInputs = append(keysForInputs, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForInputs) + for iNdEx := len(keysForInputs) - 1; iNdEx >= 0; iNdEx-- { + v := m.Inputs[string(keysForInputs[iNdEx])] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(keysForInputs[iNdEx]) + copy(dAtA[i:], keysForInputs[iNdEx]) + i = encodeVarintOps(dAtA, i, uint64(len(keysForInputs[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintOps(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.Builder != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Builder)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BuildInput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildInput) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildInput) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Input != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Input)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OpMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OpMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OpMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Caps) > 0 { + keysForCaps := make([]string, 0, len(m.Caps)) + for k := range m.Caps { + keysForCaps = append(keysForCaps, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCaps) + for iNdEx := len(keysForCaps) - 1; iNdEx >= 0; iNdEx-- { + v := m.Caps[github_com_moby_buildkit_util_apicaps.CapID(keysForCaps[iNdEx])] + baseI := i + i-- + if v { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(keysForCaps[iNdEx]) + copy(dAtA[i:], keysForCaps[iNdEx]) + i = encodeVarintOps(dAtA, i, uint64(len(keysForCaps[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintOps(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.ExportCache != nil { + { + size, err := m.ExportCache.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Description) > 0 { + keysForDescription := make([]string, 0, len(m.Description)) + for k := range m.Description { + keysForDescription = append(keysForDescription, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDescription) + for iNdEx := len(keysForDescription) - 1; iNdEx >= 0; iNdEx-- { + v := m.Description[string(keysForDescription[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintOps(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForDescription[iNdEx]) + copy(dAtA[i:], keysForDescription[iNdEx]) + i = encodeVarintOps(dAtA, i, uint64(len(keysForDescription[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintOps(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.IgnoreCache { + i-- + if m.IgnoreCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ExportCache) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportCache) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportCache) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value { + i-- + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ProxyEnv) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProxyEnv) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProxyEnv) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NoProxy) > 0 { + i -= len(m.NoProxy) + copy(dAtA[i:], m.NoProxy) + i = encodeVarintOps(dAtA, i, uint64(len(m.NoProxy))) + i-- + dAtA[i] = 0x22 + } + if len(m.FtpProxy) > 0 { + i -= len(m.FtpProxy) + copy(dAtA[i:], m.FtpProxy) + i = encodeVarintOps(dAtA, i, uint64(len(m.FtpProxy))) + i-- + dAtA[i] = 0x1a + } + if len(m.HttpsProxy) > 0 { + i -= len(m.HttpsProxy) + copy(dAtA[i:], m.HttpsProxy) + i = encodeVarintOps(dAtA, i, uint64(len(m.HttpsProxy))) + i-- + dAtA[i] = 0x12 + } + if len(m.HttpProxy) > 0 { + i -= len(m.HttpProxy) + copy(dAtA[i:], m.HttpProxy) + i = encodeVarintOps(dAtA, i, uint64(len(m.HttpProxy))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkerConstraints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkerConstraints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkerConstraints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Filter) > 0 { + for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Filter[iNdEx]) + copy(dAtA[i:], m.Filter[iNdEx]) + i = encodeVarintOps(dAtA, i, uint64(len(m.Filter[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Definition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Definition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Definition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Metadata) > 0 { + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[github_com_opencontainers_go_digest.Digest(keysForMetadata[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintOps(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintOps(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Def) > 0 { + for iNdEx := len(m.Def) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Def[iNdEx]) + copy(dAtA[i:], m.Def[iNdEx]) + i = encodeVarintOps(dAtA, i, uint64(len(m.Def[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HostIP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostIP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostIP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.IP) > 0 { + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintOps(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintOps(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FileOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Actions) > 0 { + for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Actions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *FileAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Action != nil { + { + size := m.Action.Size() + i -= size + if _, err := m.Action.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.Output != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Output)) + i-- + dAtA[i] = 0x18 + } + if m.SecondaryInput != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.SecondaryInput)) + i-- + dAtA[i] = 0x10 + } + if m.Input != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Input)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FileAction_Copy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileAction_Copy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Copy != nil { + { + size, err := m.Copy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *FileAction_Mkfile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileAction_Mkfile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Mkfile != nil { + { + size, err := m.Mkfile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *FileAction_Mkdir) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileAction_Mkdir) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Mkdir != nil { + { + size, err := m.Mkdir.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *FileAction_Rm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileAction_Rm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Rm != nil { + { + size, err := m.Rm.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *FileActionCopy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileActionCopy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileActionCopy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x58 + } + if m.AllowEmptyWildcard { + i-- + if m.AllowEmptyWildcard { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.AllowWildcard { + i-- + if m.AllowWildcard { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if m.CreateDestPath { + i-- + if m.CreateDestPath { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.AttemptUnpackDockerCompatibility { + i-- + if m.AttemptUnpackDockerCompatibility { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.DirCopyContents { + i-- + if m.DirCopyContents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.FollowSymlink { + i-- + if m.FollowSymlink { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Mode != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x20 + } + if m.Owner != nil { + { + size, err := m.Owner.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Dest) > 0 { + i -= len(m.Dest) + copy(dAtA[i:], m.Dest) + i = encodeVarintOps(dAtA, i, uint64(len(m.Dest))) + i-- + dAtA[i] = 0x12 + } + if len(m.Src) > 0 { + i -= len(m.Src) + copy(dAtA[i:], m.Src) + i = encodeVarintOps(dAtA, i, uint64(len(m.Src))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FileActionMkFile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileActionMkFile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileActionMkFile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x28 + } + if m.Owner != nil { + { + size, err := m.Owner.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintOps(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a + } + if m.Mode != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x10 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintOps(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FileActionMkDir) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileActionMkDir) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileActionMkDir) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x28 + } + if m.Owner != nil { + { + size, err := m.Owner.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.MakeParents { + i-- + if m.MakeParents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Mode != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x10 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintOps(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FileActionRm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileActionRm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileActionRm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AllowWildcard { + i-- + if m.AllowWildcard { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.AllowNotFound { + i-- + if m.AllowNotFound { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintOps(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ChownOpt) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChownOpt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChownOpt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Group != nil { + { + size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.User != nil { + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UserOpt) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserOpt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserOpt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.User != nil { + { + size := m.User.Size() + i -= size + if _, err := m.User.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *UserOpt_ByName) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserOpt_ByName) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ByName != nil { + { + size, err := m.ByName.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *UserOpt_ByID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserOpt_ByID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintOps(dAtA, i, uint64(m.ByID)) + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func (m *NamedUserOpt) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedUserOpt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedUserOpt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Input != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Input)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintOps(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintOps(dAtA []byte, offset int, v uint64) int { + offset -= sovOps(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Op) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Inputs) > 0 { + for _, e := range m.Inputs { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } + if m.Op != nil { + n += m.Op.Size() + } + if m.Platform != nil { + l = m.Platform.Size() + n += 1 + l + sovOps(uint64(l)) + } + if m.Constraints != nil { + l = m.Constraints.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *Op_Exec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *Op_Source) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Source != nil { + l = m.Source.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *Op_File) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.File != nil { + l = m.File.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *Op_Build) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Build != nil { + l = m.Build.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *Platform) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Architecture) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.OS) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.Variant) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.OSVersion) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if len(m.OSFeatures) > 0 { + for _, s := range m.OSFeatures { + l = len(s) + n += 1 + l + sovOps(uint64(l)) + } + } + return n +} + +func (m *Input) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovOps(uint64(m.Index)) + } + return n +} + +func (m *ExecOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Meta != nil { + l = m.Meta.Size() + n += 1 + l + sovOps(uint64(l)) + } + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } + if m.Network != 0 { + n += 1 + sovOps(uint64(m.Network)) + } + if m.Security != 0 { + n += 1 + sovOps(uint64(m.Security)) + } + return n +} + +func (m *Meta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovOps(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovOps(uint64(l)) + } + } + l = len(m.Cwd) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.ProxyEnv != nil { + l = m.ProxyEnv.Size() + n += 1 + l + sovOps(uint64(l)) + } + if len(m.ExtraHosts) > 0 { + for _, e := range m.ExtraHosts { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } + return n +} + +func (m *Mount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + l = len(m.Selector) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.Dest) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Output != 0 { + n += 1 + sovOps(uint64(m.Output)) + } + if m.Readonly { + n += 2 + } + if m.MountType != 0 { + n += 1 + sovOps(uint64(m.MountType)) + } + if m.CacheOpt != nil { + l = m.CacheOpt.Size() + n += 2 + l + sovOps(uint64(l)) + } + if m.SecretOpt != nil { + l = m.SecretOpt.Size() + n += 2 + l + sovOps(uint64(l)) + } + if m.SSHOpt != nil { + l = m.SSHOpt.Size() + n += 2 + l + sovOps(uint64(l)) + } + return n +} + +func (m *CacheOpt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Sharing != 0 { + n += 1 + sovOps(uint64(m.Sharing)) + } + return n +} + +func (m *SecretOpt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Uid != 0 { + n += 1 + sovOps(uint64(m.Uid)) + } + if m.Gid != 0 { + n += 1 + sovOps(uint64(m.Gid)) + } + if m.Mode != 0 { + n += 1 + sovOps(uint64(m.Mode)) + } + if m.Optional { + n += 2 + } + return n +} + +func (m *SSHOpt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Uid != 0 { + n += 1 + sovOps(uint64(m.Uid)) + } + if m.Gid != 0 { + n += 1 + sovOps(uint64(m.Gid)) + } + if m.Mode != 0 { + n += 1 + sovOps(uint64(m.Mode)) + } + if m.Optional { + n += 2 + } + return n +} + +func (m *SourceOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + return n +} + +func (m *BuildOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Builder != 0 { + n += 1 + sovOps(uint64(m.Builder)) + } + if len(m.Inputs) > 0 { + for k, v := range m.Inputs { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovOps(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + l + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + if m.Def != nil { + l = m.Def.Size() + n += 1 + l + sovOps(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + return n +} + +func (m *BuildInput) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + return n +} + +func (m *OpMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IgnoreCache { + n += 2 + } + if len(m.Description) > 0 { + for k, v := range m.Description { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + if m.ExportCache != nil { + l = m.ExportCache.Size() + n += 1 + l + sovOps(uint64(l)) + } + if len(m.Caps) > 0 { + for k, v := range m.Caps { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + 1 + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ExportCache) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value { + n += 2 + } + return n +} + +func (m *ProxyEnv) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HttpProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.HttpsProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.FtpProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.NoProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *WorkerConstraints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + l = len(s) + n += 1 + l + sovOps(uint64(l)) + } + } + return n +} + +func (m *Definition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Def) > 0 { + for _, b := range m.Def { + l = len(b) + n += 1 + l + sovOps(uint64(l)) + } + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + l + sovOps(uint64(l)) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + return n +} + +func (m *HostIP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.IP) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *FileOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } + return n +} + +func (m *FileAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + if m.SecondaryInput != 0 { + n += 1 + sovOps(uint64(m.SecondaryInput)) + } + if m.Output != 0 { + n += 1 + sovOps(uint64(m.Output)) + } + if m.Action != nil { + n += m.Action.Size() + } + return n +} + +func (m *FileAction_Copy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Copy != nil { + l = m.Copy.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *FileAction_Mkfile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Mkfile != nil { + l = m.Mkfile.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *FileAction_Mkdir) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Mkdir != nil { + l = m.Mkdir.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *FileAction_Rm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Rm != nil { + l = m.Rm.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *FileActionCopy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Src) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.Dest) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Owner != nil { + l = m.Owner.Size() + n += 1 + l + sovOps(uint64(l)) + } + if m.Mode != 0 { + n += 1 + sovOps(uint64(m.Mode)) + } + if m.FollowSymlink { + n += 2 + } + if m.DirCopyContents { + n += 2 + } + if m.AttemptUnpackDockerCompatibility { + n += 2 + } + if m.CreateDestPath { + n += 2 + } + if m.AllowWildcard { + n += 2 + } + if m.AllowEmptyWildcard { + n += 2 + } + if m.Timestamp != 0 { + n += 1 + sovOps(uint64(m.Timestamp)) + } + return n +} + +func (m *FileActionMkFile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Mode != 0 { + n += 1 + sovOps(uint64(m.Mode)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Owner != nil { + l = m.Owner.Size() + n += 1 + l + sovOps(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + sovOps(uint64(m.Timestamp)) + } + return n +} + +func (m *FileActionMkDir) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Mode != 0 { + n += 1 + sovOps(uint64(m.Mode)) + } + if m.MakeParents { + n += 2 + } + if m.Owner != nil { + l = m.Owner.Size() + n += 1 + l + sovOps(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + sovOps(uint64(m.Timestamp)) + } + return n +} + +func (m *FileActionRm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.AllowNotFound { + n += 2 + } + if m.AllowWildcard { + n += 2 + } + return n +} + +func (m *ChownOpt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovOps(uint64(l)) + } + if m.Group != nil { + l = m.Group.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *UserOpt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.User != nil { + n += m.User.Size() + } + return n +} + +func (m *UserOpt_ByName) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ByName != nil { + l = m.ByName.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *UserOpt_ByID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovOps(uint64(m.ByID)) + return n +} +func (m *NamedUserOpt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + return n +} + +func sovOps(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozOps(x uint64) (n int) { + return sovOps(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Op) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Op: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Op: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Inputs = append(m.Inputs, &Input{}) + if err := m.Inputs[len(m.Inputs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ExecOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Exec{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SourceOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Source{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_File{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Build", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &BuildOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Build{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Platform == nil { + m.Platform = &Platform{} + } + if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Constraints == nil { + m.Constraints = &WorkerConstraints{} + } + if err := m.Constraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Platform) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Platform: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OS = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Variant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OSVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OSVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OSFeatures", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OSFeatures = append(m.OSFeatures, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Input) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Input: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Input: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= OutputIndex(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Meta == nil { + m.Meta = &Meta{} + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mounts = append(m.Mounts, &Mount{}) + if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + m.Network = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Network |= NetMode(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Security", wireType) + } + m.Security = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Security |= SecurityMode(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Meta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Meta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Meta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cwd", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cwd = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProxyEnv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProxyEnv == nil { + m.ProxyEnv = &ProxyEnv{} + } + if err := m.ProxyEnv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtraHosts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExtraHosts = append(m.ExtraHosts, &HostIP{}) + if err := m.ExtraHosts[len(m.ExtraHosts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + m.Input = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Input |= InputIndex(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + m.Output = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Output |= OutputIndex(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Readonly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Readonly = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MountType", wireType) + } + m.MountType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MountType |= MountType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CacheOpt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CacheOpt == nil { + m.CacheOpt = &CacheOpt{} + } + if err := m.CacheOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretOpt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretOpt == nil { + m.SecretOpt = &SecretOpt{} + } + if err := m.SecretOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SSHOpt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SSHOpt == nil { + m.SSHOpt = &SSHOpt{} + } + if err := m.SSHOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CacheOpt) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CacheOpt: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CacheOpt: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sharing", wireType) + } + m.Sharing = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Sharing |= CacheSharingOpt(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretOpt) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretOpt: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretOpt: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + m.Uid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Uid |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType) + } + m.Gid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Gid |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Optional = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SSHOpt) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SSHOpt: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SSHOpt: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + m.Uid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Uid |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType) + } + m.Gid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Gid |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Optional = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attrs == nil { + m.Attrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthOps + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthOps + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Builder", wireType) + } + m.Builder = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Builder |= InputIndex(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Inputs == nil { + m.Inputs = make(map[string]*BuildInput) + } + var mapkey string + var mapvalue *BuildInput + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthOps + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthOps + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthOps + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &BuildInput{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Inputs[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Def == nil { + m.Def = &Definition{} + } + if err := m.Def.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attrs == nil { + m.Attrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthOps + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthOps + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildInput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildInput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildInput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + m.Input = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Input |= InputIndex(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OpMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OpMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OpMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreCache = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Description == nil { + m.Description = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthOps + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthOps + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Description[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExportCache", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExportCache == nil { + m.ExportCache = &ExportCache{} + } + if err := m.ExportCache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Caps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Caps == nil { + m.Caps = make(map[github_com_moby_buildkit_util_apicaps.CapID]bool) + } + var mapkey github_com_moby_buildkit_util_apicaps.CapID + var mapvalue bool + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthOps + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = github_com_moby_buildkit_util_apicaps.CapID(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvaluetemp |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + mapvalue = bool(mapvaluetemp != 0) + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Caps[github_com_moby_buildkit_util_apicaps.CapID(mapkey)] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportCache) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportCache: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportCache: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProxyEnv) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProxyEnv: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProxyEnv: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HttpProxy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpsProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HttpsProxy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FtpProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FtpProxy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NoProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NoProxy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkerConstraints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkerConstraints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkerConstraints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Definition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Definition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Definition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Def = append(m.Def, make([]byte, postIndex-iNdEx)) + copy(m.Def[len(m.Def)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[github_com_opencontainers_go_digest.Digest]OpMetadata) + } + var mapkey github_com_opencontainers_go_digest.Digest + mapvalue := &OpMetadata{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthOps + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthOps + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthOps + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &OpMetadata{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[github_com_opencontainers_go_digest.Digest(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostIP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostIP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostIP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Actions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Actions = append(m.Actions, &FileAction{}) + if err := m.Actions[len(m.Actions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + m.Input = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Input |= InputIndex(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SecondaryInput", wireType) + } + m.SecondaryInput = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SecondaryInput |= InputIndex(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + m.Output = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Output |= OutputIndex(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Copy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileActionCopy{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Action = &FileAction_Copy{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mkfile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileActionMkFile{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Action = &FileAction_Mkfile{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mkdir", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileActionMkDir{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Action = &FileAction_Mkdir{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileActionRm{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Action = &FileAction_Rm{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileActionCopy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileActionCopy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileActionCopy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Src", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Src = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Owner == nil { + m.Owner = &ChownOpt{} + } + if err := m.Owner.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FollowSymlink", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FollowSymlink = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DirCopyContents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DirCopyContents = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AttemptUnpackDockerCompatibility", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AttemptUnpackDockerCompatibility = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateDestPath", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CreateDestPath = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowWildcard", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowWildcard = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowEmptyWildcard", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowEmptyWildcard = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileActionMkFile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileActionMkFile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileActionMkFile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Owner == nil { + m.Owner = &ChownOpt{} + } + if err := m.Owner.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileActionMkDir) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileActionMkDir: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileActionMkDir: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MakeParents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MakeParents = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Owner == nil { + m.Owner = &ChownOpt{} + } + if err := m.Owner.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileActionRm) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileActionRm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileActionRm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowNotFound", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowNotFound = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowWildcard", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowWildcard = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChownOpt) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChownOpt: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChownOpt: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.User == nil { + m.User = &UserOpt{} + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Group == nil { + m.Group = &UserOpt{} + } + if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserOpt) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserOpt: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserOpt: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ByName", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NamedUserOpt{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.User = &UserOpt_ByName{v} + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ByID", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.User = &UserOpt_ByID{v} + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedUserOpt) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedUserOpt: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedUserOpt: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + m.Input = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Input |= InputIndex(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipOps(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOps + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOps + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOps + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthOps + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupOps + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthOps + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthOps = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowOps = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupOps = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.proto b/vendor/github.com/moby/buildkit/solver/pb/ops.proto new file mode 100644 index 00000000..a24aad12 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.proto @@ -0,0 +1,305 @@ +syntax = "proto3"; + +// Package pb provides the protobuf definition of LLB: low-level builder instruction. +// LLB is DAG-structured; Op represents a vertex, and Definition represents a graph. +package pb; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.stable_marshaler_all) = true; + +// Op represents a vertex of the LLB DAG. +message Op { + // inputs is a set of input edges. + repeated Input inputs = 1; + oneof op { + ExecOp exec = 2; + SourceOp source = 3; + FileOp file = 4; + BuildOp build = 5; + } + Platform platform = 10; + WorkerConstraints constraints = 11; +} + +// Platform is github.com/opencontainers/image-spec/specs-go/v1.Platform +message Platform { + string Architecture = 1; + string OS = 2; + string Variant = 3; + string OSVersion = 4; // unused + repeated string OSFeatures = 5; // unused +} + +// Input represents an input edge for an Op. +message Input { + // digest of the marshaled input Op + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + // output index of the input Op + int64 index = 2 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; +} + +// ExecOp executes a command in a container. +message ExecOp { + Meta meta = 1; + repeated Mount mounts = 2; + NetMode network = 3; + SecurityMode security = 4; +} + +// Meta is a set of arguments for ExecOp. +// Meta is unrelated to LLB metadata. +// FIXME: rename (ExecContext? ExecArgs?) +message Meta { + repeated string args = 1; + repeated string env = 2; + string cwd = 3; + string user = 4; + ProxyEnv proxy_env = 5; + repeated HostIP extraHosts = 6; +} + +enum NetMode { + UNSET = 0; // sandbox + HOST = 1; + NONE = 2; +} + +enum SecurityMode { + SANDBOX = 0; + INSECURE = 1; // privileged mode +} + +// Mount specifies how to mount an input Op as a filesystem. +message Mount { + int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; + string selector = 2; + string dest = 3; + int64 output = 4 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; + bool readonly = 5; + MountType mountType = 6; + CacheOpt cacheOpt = 20; + SecretOpt secretOpt = 21; + SSHOpt SSHOpt = 22; +} + +// MountType defines a type of a mount from a supported set +enum MountType { + BIND = 0; + SECRET = 1; + SSH = 2; + CACHE = 3; + TMPFS = 4; +} + +// CacheOpt defines options specific to cache mounts +message CacheOpt { + // ID is an optional namespace for the mount + string ID = 1; + // Sharing is the sharing mode for the mount + CacheSharingOpt sharing = 2; +} + +// CacheSharingOpt defines different sharing modes for cache mount +enum CacheSharingOpt { + // SHARED cache mount can be used concurrently by multiple writers + SHARED = 0; + // PRIVATE creates a new mount if there are multiple writers + PRIVATE = 1; + // LOCKED pauses second writer until first one releases the mount + LOCKED = 2; +} + +// SecretOpt defines options describing secret mounts +message SecretOpt { + // ID of secret. Used for quering the value. + string ID = 1; + // UID of secret file + uint32 uid = 2; + // GID of secret file + uint32 gid = 3; + // Mode is the filesystem mode of secret file + uint32 mode = 4; + // Optional defines if secret value is required. Error is produced + // if value is not found and optional is false. + bool optional = 5; +} + +// SSHOpt defines options describing secret mounts +message SSHOpt { + // ID of exposed ssh rule. Used for quering the value. + string ID = 1; + // UID of agent socket + uint32 uid = 2; + // GID of agent socket + uint32 gid = 3; + // Mode is the filesystem mode of agent socket + uint32 mode = 4; + // Optional defines if ssh socket is required. Error is produced + // if client does not expose ssh. + bool optional = 5; +} + +// SourceOp specifies a source such as build contexts and images. +message SourceOp { + // TODO: use source type or any type instead of URL protocol. + // identifier e.g. local://, docker-image://, git://, https://... + string identifier = 1; + // attrs are defined in attr.go + map attrs = 2; +} + +// BuildOp is used for nested build invocation. +// BuildOp is experimental and can break without backwards compatibility +message BuildOp { + int64 builder = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; + map inputs = 2; + Definition def = 3; + map attrs = 4; + // outputs +} + +// BuildInput is used for BuildOp. +message BuildInput { + int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; +} + +// OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time. +message OpMetadata { + // ignore_cache specifies to ignore the cache for this Op. + bool ignore_cache = 1; + // Description can be used for keeping any text fields that builder doesn't parse + map description = 2; + // index 3 reserved for WorkerConstraint in previous versions + // WorkerConstraint worker_constraint = 3; + ExportCache export_cache = 4; + + map caps = 5 [(gogoproto.castkey) = "github.com/moby/buildkit/util/apicaps.CapID", (gogoproto.nullable) = false]; +} + +message ExportCache { + bool Value = 1; +} + +message ProxyEnv { + string http_proxy = 1; + string https_proxy = 2; + string ftp_proxy = 3; + string no_proxy = 4; +} + +// WorkerConstraints defines conditions for the worker +message WorkerConstraints { + repeated string filter = 1; // containerd-style filter +} + +// Definition is the LLB definition structure with per-vertex metadata entries +message Definition { + // def is a list of marshaled Op messages + repeated bytes def = 1; + // metadata contains metadata for the each of the Op messages. + // A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future. + map metadata = 2 [(gogoproto.castkey) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; +} + +message HostIP { + string Host = 1; + string IP = 2; +} + +message FileOp { + repeated FileAction actions = 2; +} + +message FileAction { + int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // could be real input or target (target index + max input index) + int64 secondaryInput = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // --//-- + int64 output = 3 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; + oneof action { + // FileActionCopy copies files from secondaryInput on top of input + FileActionCopy copy = 4; + // FileActionMkFile creates a new file + FileActionMkFile mkfile = 5; + // FileActionMkDir creates a new directory + FileActionMkDir mkdir = 6; + // FileActionRm removes a file + FileActionRm rm = 7; + } +} + +message FileActionCopy { + // src is the source path + string src = 1; + // dest path + string dest = 2; + // optional owner override + ChownOpt owner = 3; + // optional permission bits override + int32 mode = 4; + // followSymlink resolves symlinks in src + bool followSymlink = 5; + // dirCopyContents only copies contents if src is a directory + bool dirCopyContents = 6; + // attemptUnpackDockerCompatibility detects if src is an archive to unpack it instead + bool attemptUnpackDockerCompatibility = 7; + // createDestPath creates dest path directories if needed + bool createDestPath = 8; + // allowWildcard allows filepath.Match wildcards in src path + bool allowWildcard = 9; + // allowEmptyWildcard doesn't fail the whole copy if wildcard doesn't resolve to files + bool allowEmptyWildcard = 10; + // optional created time override + int64 timestamp = 11; +} + +message FileActionMkFile { + // path for the new file + string path = 1; + // permission bits + int32 mode = 2; + // data is the new file contents + bytes data = 3; + // optional owner for the new file + ChownOpt owner = 4; + // optional created time override + int64 timestamp = 5; +} + +message FileActionMkDir { + // path for the new directory + string path = 1; + // permission bits + int32 mode = 2; + // makeParents creates parent directories as well if needed + bool makeParents = 3; + // optional owner for the new directory + ChownOpt owner = 4; + // optional created time override + int64 timestamp = 5; +} + +message FileActionRm { + // path to remove + string path = 1; + // allowNotFound doesn't fail the rm if file is not found + bool allowNotFound = 2; + // allowWildcard allows filepath.Match wildcards in path + bool allowWildcard = 3; +} + +message ChownOpt { + UserOpt user = 1; + UserOpt group = 2; +} + +message UserOpt { + oneof user { + NamedUserOpt byName = 1; + uint32 byID = 2; + } +} + +message NamedUserOpt { + string name = 1; + int64 input = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; +} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/solver/pb/platform.go b/vendor/github.com/moby/buildkit/solver/pb/platform.go new file mode 100644 index 00000000..a434aa71 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/pb/platform.go @@ -0,0 +1,41 @@ +package pb + +import ( + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func (p *Platform) Spec() specs.Platform { + return specs.Platform{ + OS: p.OS, + Architecture: p.Architecture, + Variant: p.Variant, + OSVersion: p.OSVersion, + OSFeatures: p.OSFeatures, + } +} + +func PlatformFromSpec(p specs.Platform) Platform { + return Platform{ + OS: p.OS, + Architecture: p.Architecture, + Variant: p.Variant, + OSVersion: p.OSVersion, + OSFeatures: p.OSFeatures, + } +} + +func ToSpecPlatforms(p []Platform) []specs.Platform { + out := make([]specs.Platform, 0, len(p)) + for _, pp := range p { + out = append(out, pp.Spec()) + } + return out +} + +func PlatformsFromSpec(p []specs.Platform) []Platform { + out := make([]Platform, 0, len(p)) + for _, pp := range p { + out = append(out, PlatformFromSpec(pp)) + } + return out +} diff --git a/vendor/github.com/moby/buildkit/solver/progress.go b/vendor/github.com/moby/buildkit/solver/progress.go new file mode 100644 index 00000000..14f3f5e0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/progress.go @@ -0,0 +1,109 @@ +package solver + +import ( + "context" + "io" + "time" + + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/util/progress" + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +func (j *Job) Status(ctx context.Context, ch chan *client.SolveStatus) error { + vs := &vertexStream{cache: map[digest.Digest]*client.Vertex{}} + pr := j.pr.Reader(ctx) + defer func() { + if enc := vs.encore(); len(enc) > 0 { + ch <- &client.SolveStatus{Vertexes: enc} + } + close(ch) + }() + + for { + p, err := pr.Read(ctx) + if err != nil { + if err == io.EOF { + return nil + } + return err + } + ss := &client.SolveStatus{} + for _, p := range p { + switch v := p.Sys.(type) { + case client.Vertex: + ss.Vertexes = append(ss.Vertexes, vs.append(v)...) + + case progress.Status: + vtx, ok := p.Meta("vertex") + if !ok { + logrus.Warnf("progress %s status without vertex info", p.ID) + continue + } + vs := &client.VertexStatus{ + ID: p.ID, + Vertex: vtx.(digest.Digest), + Name: v.Action, + Total: int64(v.Total), + Current: int64(v.Current), + Timestamp: p.Timestamp, + Started: v.Started, + Completed: v.Completed, + } + ss.Statuses = append(ss.Statuses, vs) + case client.VertexLog: + vtx, ok := p.Meta("vertex") + if !ok { + logrus.Warnf("progress %s log without vertex info", p.ID) + continue + } + v.Vertex = vtx.(digest.Digest) + v.Timestamp = p.Timestamp + ss.Logs = append(ss.Logs, &v) + } + } + select { + case <-ctx.Done(): + return ctx.Err() + case ch <- ss: + } + } +} + +type vertexStream struct { + cache map[digest.Digest]*client.Vertex +} + +func (vs *vertexStream) append(v client.Vertex) []*client.Vertex { + var out []*client.Vertex + vs.cache[v.Digest] = &v + if v.Started != nil { + for _, inp := range v.Inputs { + if inpv, ok := vs.cache[inp]; ok { + if !inpv.Cached && inpv.Completed == nil { + inpv.Cached = true + inpv.Started = v.Started + inpv.Completed = v.Started + out = append(out, vs.append(*inpv)...) + delete(vs.cache, inp) + } + } + } + } + vcopy := v + return append(out, &vcopy) +} + +func (vs *vertexStream) encore() []*client.Vertex { + var out []*client.Vertex + for _, v := range vs.cache { + if v.Started != nil && v.Completed == nil { + now := time.Now() + v.Completed = &now + v.Error = context.Canceled.Error() + out = append(out, v) + } + } + return out +} diff --git a/vendor/github.com/moby/buildkit/solver/result.go b/vendor/github.com/moby/buildkit/solver/result.go new file mode 100644 index 00000000..c7e100b0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/result.go @@ -0,0 +1,105 @@ +package solver + +import ( + "context" + "sync" + "sync/atomic" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// SharedResult is a result that can be cloned +type SharedResult struct { + mu sync.Mutex + main Result +} + +func NewSharedResult(main Result) *SharedResult { + return &SharedResult{main: main} +} + +func (r *SharedResult) Clone() Result { + r.mu.Lock() + defer r.mu.Unlock() + + r1, r2 := dup(r.main) + r.main = r1 + return r2 +} + +func (r *SharedResult) Release(ctx context.Context) error { + r.mu.Lock() + defer r.mu.Unlock() + return r.main.Release(ctx) +} + +func dup(res Result) (Result, Result) { + sem := int64(0) + return &splitResult{Result: res, sem: &sem}, &splitResult{Result: res, sem: &sem} +} + +type splitResult struct { + released int64 + sem *int64 + Result +} + +func (r *splitResult) Release(ctx context.Context) error { + if atomic.AddInt64(&r.released, 1) > 1 { + err := errors.Errorf("releasing already released reference") + logrus.Error(err) + return err + } + if atomic.AddInt64(r.sem, 1) == 2 { + return r.Result.Release(ctx) + } + return nil +} + +// NewCachedResult combines a result and cache key into cached result +func NewCachedResult(res Result, k []ExportableCacheKey) CachedResult { + return &cachedResult{res, k} +} + +type cachedResult struct { + Result + k []ExportableCacheKey +} + +func (cr *cachedResult) CacheKeys() []ExportableCacheKey { + return cr.k +} + +func NewSharedCachedResult(res CachedResult) *SharedCachedResult { + return &SharedCachedResult{ + SharedResult: NewSharedResult(res), + CachedResult: res, + } +} + +func (r *SharedCachedResult) Clone() CachedResult { + return &clonedCachedResult{Result: r.SharedResult.Clone(), cr: r.CachedResult} +} + +func (r *SharedCachedResult) Release(ctx context.Context) error { + return r.SharedResult.Release(ctx) +} + +type clonedCachedResult struct { + Result + cr CachedResult +} + +func (r *clonedCachedResult) ID() string { + return r.Result.ID() +} + +func (cr *clonedCachedResult) CacheKeys() []ExportableCacheKey { + return cr.cr.CacheKeys() +} + +type SharedCachedResult struct { + *SharedResult + CachedResult +} diff --git a/vendor/github.com/moby/buildkit/solver/scheduler.go b/vendor/github.com/moby/buildkit/solver/scheduler.go new file mode 100644 index 00000000..7fe981ef --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/scheduler.go @@ -0,0 +1,410 @@ +package solver + +import ( + "context" + "os" + "sync" + + "github.com/moby/buildkit/solver/internal/pipe" + "github.com/moby/buildkit/util/cond" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var debugScheduler = false // TODO: replace with logs in build trace + +func init() { + if os.Getenv("BUILDKIT_SCHEDULER_DEBUG") == "1" { + debugScheduler = true + } +} + +func newScheduler(ef edgeFactory) *scheduler { + s := &scheduler{ + waitq: map[*edge]struct{}{}, + incoming: map[*edge][]*edgePipe{}, + outgoing: map[*edge][]*edgePipe{}, + + stopped: make(chan struct{}), + closed: make(chan struct{}), + + ef: ef, + } + s.cond = cond.NewStatefulCond(&s.mu) + + go s.loop() + + return s +} + +type dispatcher struct { + next *dispatcher + e *edge +} + +type scheduler struct { + cond *cond.StatefulCond + mu sync.Mutex + muQ sync.Mutex + + ef edgeFactory + + waitq map[*edge]struct{} + next *dispatcher + last *dispatcher + stopped chan struct{} + stoppedOnce sync.Once + closed chan struct{} + + incoming map[*edge][]*edgePipe + outgoing map[*edge][]*edgePipe +} + +func (s *scheduler) Stop() { + s.stoppedOnce.Do(func() { + close(s.stopped) + }) + <-s.closed +} + +func (s *scheduler) loop() { + defer func() { + close(s.closed) + }() + + go func() { + <-s.stopped + s.mu.Lock() + s.cond.Signal() + s.mu.Unlock() + }() + + s.mu.Lock() + for { + select { + case <-s.stopped: + s.mu.Unlock() + return + default: + } + s.muQ.Lock() + l := s.next + if l != nil { + if l == s.last { + s.last = nil + } + s.next = l.next + delete(s.waitq, l.e) + } + s.muQ.Unlock() + if l == nil { + s.cond.Wait() + continue + } + s.dispatch(l.e) + } +} + +// dispatch schedules an edge to be processed +func (s *scheduler) dispatch(e *edge) { + inc := make([]pipe.Sender, len(s.incoming[e])) + for i, p := range s.incoming[e] { + inc[i] = p.Sender + } + out := make([]pipe.Receiver, len(s.outgoing[e])) + for i, p := range s.outgoing[e] { + out[i] = p.Receiver + } + + e.hasActiveOutgoing = false + updates := []pipe.Receiver{} + for _, p := range out { + if ok := p.Receive(); ok { + updates = append(updates, p) + } + if !p.Status().Completed { + e.hasActiveOutgoing = true + } + } + + pf := &pipeFactory{s: s, e: e} + + // unpark the edge + debugSchedulerPreUnpark(e, inc, updates, out) + e.unpark(inc, updates, out, pf) + debugSchedulerPostUnpark(e, inc) + +postUnpark: + // set up new requests that didn't complete/were added by this run + openIncoming := make([]*edgePipe, 0, len(inc)) + for _, r := range s.incoming[e] { + if !r.Sender.Status().Completed { + openIncoming = append(openIncoming, r) + } + } + if len(openIncoming) > 0 { + s.incoming[e] = openIncoming + } else { + delete(s.incoming, e) + } + + openOutgoing := make([]*edgePipe, 0, len(out)) + for _, r := range s.outgoing[e] { + if !r.Receiver.Status().Completed { + openOutgoing = append(openOutgoing, r) + } + } + if len(openOutgoing) > 0 { + s.outgoing[e] = openOutgoing + } else { + delete(s.outgoing, e) + } + + // if keys changed there might be possiblity for merge with other edge + if e.keysDidChange { + if k := e.currentIndexKey(); k != nil { + // skip this if not at least 1 key per dep + origEdge := e.index.LoadOrStore(k, e) + if origEdge != nil { + logrus.Debugf("merging edge %s to %s\n", e.edge.Vertex.Name(), origEdge.edge.Vertex.Name()) + if s.mergeTo(origEdge, e) { + s.ef.setEdge(e.edge, origEdge) + } + } + } + e.keysDidChange = false + } + + // validation to avoid deadlocks/resource leaks: + // TODO: if these start showing up in error reports they can be changed + // to error the edge instead. They can only appear from algorithm bugs in + // unpark(), not for any external input. + if len(openIncoming) > 0 && len(openOutgoing) == 0 { + e.markFailed(pf, errors.New("buildkit scheduler error: return leaving incoming open. Please report this with BUILDKIT_SCHEDULER_DEBUG=1")) + goto postUnpark + } + if len(openIncoming) == 0 && len(openOutgoing) > 0 { + e.markFailed(pf, errors.New("buildkit scheduler error: return leaving outgoing open. Please report this with BUILDKIT_SCHEDULER_DEBUG=1")) + goto postUnpark + } +} + +// signal notifies that an edge needs to be processed again +func (s *scheduler) signal(e *edge) { + s.muQ.Lock() + if _, ok := s.waitq[e]; !ok { + d := &dispatcher{e: e} + if s.last == nil { + s.next = d + } else { + s.last.next = d + } + s.last = d + s.waitq[e] = struct{}{} + s.cond.Signal() + } + s.muQ.Unlock() +} + +// build evaluates edge into a result +func (s *scheduler) build(ctx context.Context, edge Edge) (CachedResult, error) { + s.mu.Lock() + e := s.ef.getEdge(edge) + if e == nil { + s.mu.Unlock() + return nil, errors.Errorf("invalid request %v for build", edge) + } + + wait := make(chan struct{}) + + var p *pipe.Pipe + p = s.newPipe(e, nil, pipe.Request{Payload: &edgeRequest{desiredState: edgeStatusComplete}}) + p.OnSendCompletion = func() { + p.Receiver.Receive() + if p.Receiver.Status().Completed { + close(wait) + } + } + s.mu.Unlock() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + go func() { + <-ctx.Done() + p.Receiver.Cancel() + }() + + <-wait + + if err := p.Receiver.Status().Err; err != nil { + return nil, err + } + return p.Receiver.Status().Value.(*edgeState).result.Clone(), nil +} + +// newPipe creates a new request pipe between two edges +func (s *scheduler) newPipe(target, from *edge, req pipe.Request) *pipe.Pipe { + p := &edgePipe{ + Pipe: pipe.New(req), + Target: target, + From: from, + } + + s.signal(target) + if from != nil { + p.OnSendCompletion = func() { + p.mu.Lock() + defer p.mu.Unlock() + s.signal(p.From) + } + s.outgoing[from] = append(s.outgoing[from], p) + } + s.incoming[target] = append(s.incoming[target], p) + p.OnReceiveCompletion = func() { + p.mu.Lock() + defer p.mu.Unlock() + s.signal(p.Target) + } + return p.Pipe +} + +// newRequestWithFunc creates a new request pipe that invokes a async function +func (s *scheduler) newRequestWithFunc(e *edge, f func(context.Context) (interface{}, error)) pipe.Receiver { + pp, start := pipe.NewWithFunction(f) + p := &edgePipe{ + Pipe: pp, + From: e, + } + p.OnSendCompletion = func() { + p.mu.Lock() + defer p.mu.Unlock() + s.signal(p.From) + } + s.outgoing[e] = append(s.outgoing[e], p) + go start() + return p.Receiver +} + +// mergeTo merges the state from one edge to another. source edge is discarded. +func (s *scheduler) mergeTo(target, src *edge) bool { + if !target.edge.Vertex.Options().IgnoreCache && src.edge.Vertex.Options().IgnoreCache { + return false + } + for _, inc := range s.incoming[src] { + inc.mu.Lock() + inc.Target = target + s.incoming[target] = append(s.incoming[target], inc) + inc.mu.Unlock() + } + + for _, out := range s.outgoing[src] { + out.mu.Lock() + out.From = target + s.outgoing[target] = append(s.outgoing[target], out) + out.mu.Unlock() + out.Receiver.Cancel() + } + + delete(s.incoming, src) + delete(s.outgoing, src) + s.signal(target) + + for i, d := range src.deps { + for _, k := range d.keys { + target.secondaryExporters = append(target.secondaryExporters, expDep{i, CacheKeyWithSelector{CacheKey: k, Selector: src.cacheMap.Deps[i].Selector}}) + } + if d.slowCacheKey != nil { + target.secondaryExporters = append(target.secondaryExporters, expDep{i, CacheKeyWithSelector{CacheKey: *d.slowCacheKey}}) + } + if d.result != nil { + for _, dk := range d.result.CacheKeys() { + target.secondaryExporters = append(target.secondaryExporters, expDep{i, CacheKeyWithSelector{CacheKey: dk, Selector: src.cacheMap.Deps[i].Selector}}) + } + } + } + + // TODO(tonistiigi): merge cache providers + + return true +} + +// edgeFactory allows access to the edges from a shared graph +type edgeFactory interface { + getEdge(Edge) *edge + setEdge(Edge, *edge) +} + +type pipeFactory struct { + e *edge + s *scheduler +} + +func (pf *pipeFactory) NewInputRequest(ee Edge, req *edgeRequest) pipe.Receiver { + target := pf.s.ef.getEdge(ee) + if target == nil { + panic("failed to get edge") // TODO: return errored pipe + } + p := pf.s.newPipe(target, pf.e, pipe.Request{Payload: req}) + if debugScheduler { + logrus.Debugf("> newPipe %s %p desiredState=%s", ee.Vertex.Name(), p, req.desiredState) + } + return p.Receiver +} + +func (pf *pipeFactory) NewFuncRequest(f func(context.Context) (interface{}, error)) pipe.Receiver { + p := pf.s.newRequestWithFunc(pf.e, f) + if debugScheduler { + logrus.Debugf("> newFunc %p", p) + } + return p +} + +func debugSchedulerPreUnpark(e *edge, inc []pipe.Sender, updates, allPipes []pipe.Receiver) { + if !debugScheduler { + return + } + logrus.Debugf(">> unpark %s req=%d upt=%d out=%d state=%s %s", e.edge.Vertex.Name(), len(inc), len(updates), len(allPipes), e.state, e.edge.Vertex.Digest()) + + for i, dep := range e.deps { + des := edgeStatusInitial + if dep.req != nil { + des = dep.req.Request().(*edgeRequest).desiredState + } + logrus.Debugf(":: dep%d %s state=%s des=%s keys=%d hasslowcache=%v", i, e.edge.Vertex.Inputs()[i].Vertex.Name(), dep.state, des, len(dep.keys), e.slowCacheFunc(dep) != nil) + } + + for i, in := range inc { + req := in.Request() + logrus.Debugf("> incoming-%d: %p dstate=%s canceled=%v", i, in, req.Payload.(*edgeRequest).desiredState, req.Canceled) + } + + for i, up := range updates { + if up == e.cacheMapReq { + logrus.Debugf("> update-%d: %p cacheMapReq complete=%v", i, up, up.Status().Completed) + } else if up == e.execReq { + logrus.Debugf("> update-%d: %p execReq complete=%v", i, up, up.Status().Completed) + } else { + st, ok := up.Status().Value.(*edgeState) + if ok { + index := -1 + if dep, ok := e.depRequests[up]; ok { + index = int(dep.index) + } + logrus.Debugf("> update-%d: %p input-%d keys=%d state=%s", i, up, index, len(st.keys), st.state) + } else { + logrus.Debugf("> update-%d: unknown", i) + } + } + } +} + +func debugSchedulerPostUnpark(e *edge, inc []pipe.Sender) { + if !debugScheduler { + return + } + for i, in := range inc { + logrus.Debugf("< incoming-%d: %p completed=%v", i, in, in.Status().Completed) + } + logrus.Debugf("<< unpark %s\n", e.edge.Vertex.Name()) +} diff --git a/vendor/github.com/moby/buildkit/solver/types.go b/vendor/github.com/moby/buildkit/solver/types.go new file mode 100644 index 00000000..7fd5370d --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/types.go @@ -0,0 +1,214 @@ +package solver + +import ( + "context" + "time" + + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Vertex is a node in a build graph. It defines an interface for a +// content-addressable operation and its inputs. +type Vertex interface { + // Digest returns a checksum of the definition up to the vertex including + // all of its inputs. + Digest() digest.Digest + + // Sys returns an object used to resolve the executor for this vertex. + // In LLB solver, this value would be of type `llb.Op`. + Sys() interface{} + + // Options return metadata associated with the vertex that doesn't change the + // definition or equality check of it. + Options() VertexOptions + + // Inputs returns an array of edges the vertex depends on. An input edge is + // a vertex and an index from the returned array of results from an executor + // returned by Sys(). A vertex may have zero inputs. + Inputs() []Edge + + Name() string +} + +// Index is an index value for the return array of an operation. Index starts +// counting from zero. +type Index int + +// Edge is a connection point between vertexes. An edge references a specific +// output of a vertex's operation. Edges are used as inputs to other vertexes. +type Edge struct { + Index Index + Vertex Vertex +} + +// VertexOptions define optional metadata for a vertex that doesn't change the +// definition or equality check of it. These options are not contained in the +// vertex digest. +type VertexOptions struct { + IgnoreCache bool + CacheSources []CacheManager + Description map[string]string // text values with no special meaning for solver + ExportCache *bool + // WorkerConstraint +} + +// Result is an abstract return value for a solve +type Result interface { + ID() string + Release(context.Context) error + Sys() interface{} +} + +// CachedResult is a result connected with its cache key +type CachedResult interface { + Result + CacheKeys() []ExportableCacheKey +} + +type ResultProxy interface { + Result(context.Context) (CachedResult, error) + Release(context.Context) error + Definition() *pb.Definition +} + +// CacheExportMode is the type for setting cache exporting modes +type CacheExportMode int + +const ( + // CacheExportModeMin exports a topmost allowed vertex and its dependencies + // that already have transferable layers + CacheExportModeMin CacheExportMode = iota + // CacheExportModeMax exports all possible non-root vertexes + CacheExportModeMax + // CacheExportModeRemoteOnly only exports vertexes that already have + // transferable layers + CacheExportModeRemoteOnly +) + +// CacheExportOpt defines options for exporting build cache +type CacheExportOpt struct { + // Convert can convert a build result to transferable object + Convert func(context.Context, Result) (*Remote, error) + // Mode defines a cache export algorithm + Mode CacheExportMode +} + +// CacheExporter can export the artifacts of the build chain +type CacheExporter interface { + ExportTo(ctx context.Context, t CacheExporterTarget, opt CacheExportOpt) ([]CacheExporterRecord, error) +} + +// CacheExporterTarget defines object capable of receiving exports +type CacheExporterTarget interface { + Add(dgst digest.Digest) CacheExporterRecord + Visit(interface{}) + Visited(interface{}) bool +} + +// CacheExporterRecord is a single object being exported +type CacheExporterRecord interface { + AddResult(createdAt time.Time, result *Remote) + LinkFrom(src CacheExporterRecord, index int, selector string) +} + +// Remote is a descriptor or a list of stacked descriptors that can be pulled +// from a content provider +// TODO: add closer to keep referenced data from getting deleted +type Remote struct { + Descriptors []ocispec.Descriptor + Provider content.Provider +} + +// CacheLink is a link between two cache records +type CacheLink struct { + Source digest.Digest `json:",omitempty"` + Input Index `json:",omitempty"` + Output Index `json:",omitempty"` + Base digest.Digest `json:",omitempty"` + Selector digest.Digest `json:",omitempty"` +} + +// Op defines how the solver can evaluate the properties of a vertex operation. +// An op is executed in the worker, and is retrieved from the vertex by the +// value of `vertex.Sys()`. The solver is configured with a resolve function to +// convert a `vertex.Sys()` into an `Op`. +type Op interface { + // CacheMap returns structure describing how the operation is cached. + // Currently only roots are allowed to return multiple cache maps per op. + CacheMap(context.Context, int) (*CacheMap, bool, error) + + // Exec runs an operation given results from previous operations. + Exec(ctx context.Context, inputs []Result) (outputs []Result, err error) +} + +type ResultBasedCacheFunc func(context.Context, Result) (digest.Digest, error) + +// CacheMap is a description for calculating the cache key of an operation. +type CacheMap struct { + // Digest returns a checksum for the operation. The operation result can be + // cached by a checksum that combines this digest and the cache keys of the + // operation's inputs. + // + // For example, in LLB this digest is a manifest digest for OCI images, or + // commit SHA for git sources. + Digest digest.Digest + + // Deps contain optional selectors or content-based cache functions for its + // inputs. + Deps []struct { + // Selector is a digest that is merged with the cache key of the input. + // Selectors are not merged with the result of the `ComputeDigestFunc` for + // this input. + Selector digest.Digest + + // ComputeDigestFunc should return a digest for the input based on its return + // value. + // + // For example, in LLB this is invoked to calculate the cache key based on + // the checksum of file contents from input snapshots. + ComputeDigestFunc ResultBasedCacheFunc + } +} + +// ExportableCacheKey is a cache key connected with an exporter that can export +// a chain of cacherecords pointing to that key +type ExportableCacheKey struct { + *CacheKey + Exporter CacheExporter +} + +// CacheRecord is an identifier for loading in cache +type CacheRecord struct { + ID string + Size int + CreatedAt time.Time + Priority int + + cacheManager *cacheManager + key *CacheKey +} + +// CacheManager determines if there is a result that matches the cache keys +// generated during the build that could be reused instead of fully +// reevaluating the vertex and its inputs. There can be multiple cache +// managers, and specific managers can be defined per vertex using +// `VertexOptions`. +type CacheManager interface { + // ID is used to identify cache providers that are backed by same source + // to avoid duplicate calls to the same provider. + ID() string + + // Query searches for cache paths from one cache key to the output of a + // possible match. + Query(inp []CacheKeyWithSelector, inputIndex Index, dgst digest.Digest, outputIndex Index) ([]*CacheKey, error) + Records(ck *CacheKey) ([]*CacheRecord, error) + + // Load loads a cache record into a result reference. + Load(ctx context.Context, rec *CacheRecord) (Result, error) + + // Save saves a result based on a cache key + Save(key *CacheKey, s Result, createdAt time.Time) (*ExportableCacheKey, error) +} diff --git a/vendor/github.com/moby/buildkit/source/containerimage/pull.go b/vendor/github.com/moby/buildkit/source/containerimage/pull.go new file mode 100644 index 00000000..1fd08fc6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/containerimage/pull.go @@ -0,0 +1,295 @@ +package containerimage + +import ( + "context" + "encoding/json" + "runtime" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/remotes/docker" + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/pull" + "github.com/moby/buildkit/util/winlayers" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// TODO: break apart containerd specifics like contentstore so the resolver +// code can be used with any implementation + +type SourceOpt struct { + Snapshotter snapshot.Snapshotter + ContentStore content.Store + Applier diff.Applier + CacheAccessor cache.Accessor + ImageStore images.Store // optional + RegistryHosts docker.RegistryHosts + LeaseManager leases.Manager +} + +type imageSource struct { + SourceOpt + g flightcontrol.Group +} + +func NewSource(opt SourceOpt) (source.Source, error) { + is := &imageSource{ + SourceOpt: opt, + } + + return is, nil +} + +func (is *imageSource) ID() string { + return source.DockerImageScheme +} + +func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error) { + type t struct { + dgst digest.Digest + dt []byte + } + key := ref + if platform := opt.Platform; platform != nil { + key += platforms.Format(*platform) + } + + rm, err := source.ParseImageResolveMode(opt.ResolveMode) + if err != nil { + return "", nil, err + } + + res, err := is.g.Do(ctx, key, func(ctx context.Context) (interface{}, error) { + dgst, dt, err := imageutil.Config(ctx, ref, pull.NewResolver(ctx, is.RegistryHosts, sm, is.ImageStore, rm, ref), is.ContentStore, is.LeaseManager, opt.Platform) + if err != nil { + return nil, err + } + return &t{dgst: dgst, dt: dt}, nil + }) + if err != nil { + return "", nil, err + } + typed := res.(*t) + return typed.dgst, typed.dt, nil +} + +func (is *imageSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager) (source.SourceInstance, error) { + imageIdentifier, ok := id.(*source.ImageIdentifier) + if !ok { + return nil, errors.Errorf("invalid image identifier %v", id) + } + + platform := platforms.DefaultSpec() + if imageIdentifier.Platform != nil { + platform = *imageIdentifier.Platform + } + + pullerUtil := &pull.Puller{ + Snapshotter: is.Snapshotter, + ContentStore: is.ContentStore, + Applier: is.Applier, + Src: imageIdentifier.Reference, + Resolver: pull.NewResolver(ctx, is.RegistryHosts, sm, is.ImageStore, imageIdentifier.ResolveMode, imageIdentifier.Reference.String()), + Platform: &platform, + } + p := &puller{ + CacheAccessor: is.CacheAccessor, + Puller: pullerUtil, + Platform: platform, + id: imageIdentifier, + LeaseManager: is.LeaseManager, + } + return p, nil +} + +type puller struct { + CacheAccessor cache.Accessor + LeaseManager leases.Manager + Platform specs.Platform + id *source.ImageIdentifier + *pull.Puller +} + +func mainManifestKey(ctx context.Context, desc specs.Descriptor, platform specs.Platform) (digest.Digest, error) { + dt, err := json.Marshal(struct { + Digest digest.Digest + OS string + Arch string + Variant string `json:",omitempty"` + }{ + Digest: desc.Digest, + OS: platform.OS, + Arch: platform.Architecture, + Variant: platform.Variant, + }) + if err != nil { + return "", err + } + return digest.FromBytes(dt), nil +} + +func (p *puller) CacheKey(ctx context.Context, index int) (string, bool, error) { + _, desc, err := p.Puller.Resolve(ctx) + if err != nil { + return "", false, err + } + if index == 0 || desc.Digest == "" { + k, err := mainManifestKey(ctx, desc, p.Platform) + if err != nil { + return "", false, err + } + return k.String(), false, nil + } + ref, err := reference.ParseNormalizedNamed(p.Src.String()) + if err != nil { + return "", false, err + } + ref, err = reference.WithDigest(ref, desc.Digest) + if err != nil { + return "", false, nil + } + _, dt, err := imageutil.Config(ctx, ref.String(), p.Resolver, p.ContentStore, p.LeaseManager, &p.Platform) + if err != nil { + return "", false, err + } + + k := cacheKeyFromConfig(dt).String() + if k == "" { + k, err := mainManifestKey(ctx, desc, p.Platform) + if err != nil { + return "", false, err + } + return k.String(), true, nil + } + return k, true, nil +} + +func (p *puller) Snapshot(ctx context.Context) (ir cache.ImmutableRef, err error) { + layerNeedsTypeWindows := false + if platform := p.Puller.Platform; platform != nil { + if platform.OS == "windows" && runtime.GOOS != "windows" { + ctx = winlayers.UseWindowsLayerMode(ctx) + layerNeedsTypeWindows = true + } + } + + // workaround for gcr, authentication not supported on blob endpoints + pull.EnsureManifestRequested(ctx, p.Puller.Resolver, p.Puller.Src.String()) + + ctx, done, err := leaseutil.WithLease(ctx, p.LeaseManager, leaseutil.MakeTemporary) + if err != nil { + return nil, err + } + defer done(ctx) + + pulled, err := p.Puller.Pull(ctx) + if err != nil { + return nil, err + } + if len(pulled.Layers) == 0 { + return nil, nil + } + + extractDone := oneOffProgress(ctx, "unpacking "+pulled.Ref) + var current cache.ImmutableRef + defer func() { + if err != nil && current != nil { + current.Release(context.TODO()) + } + extractDone(err) + }() + for _, l := range pulled.Layers { + ref, err := p.CacheAccessor.GetByBlob(ctx, l, current, cache.WithDescription("pulled from "+pulled.Ref)) + if err != nil { + return nil, err + } + if err := ref.Extract(ctx); err != nil { + ref.Release(context.TODO()) + return nil, err + } + if current != nil { + current.Release(context.TODO()) + } + current = ref + } + + for _, desc := range pulled.MetadataBlobs { + if err := p.LeaseManager.AddResource(ctx, leases.Lease{ID: current.ID()}, leases.Resource{ + ID: desc.Digest.String(), + Type: "content", + }); err != nil { + return nil, err + } + } + + if layerNeedsTypeWindows && current != nil { + if err := markRefLayerTypeWindows(current); err != nil { + return nil, err + } + } + + if p.id.RecordType != "" && cache.GetRecordType(current) == "" { + if err := cache.SetRecordType(current, p.id.RecordType); err != nil { + return nil, err + } + } + + return current, nil +} + +func markRefLayerTypeWindows(ref cache.ImmutableRef) error { + if parent := ref.Parent(); parent != nil { + defer parent.Release(context.TODO()) + if err := markRefLayerTypeWindows(parent); err != nil { + return err + } + } + return cache.SetLayerType(ref, "windows") +} + +// cacheKeyFromConfig returns a stable digest from image config. If image config +// is a known oci image we will use chainID of layers. +func cacheKeyFromConfig(dt []byte) digest.Digest { + var img specs.Image + err := json.Unmarshal(dt, &img) + if err != nil { + return digest.FromBytes(dt) + } + if img.RootFS.Type != "layers" || len(img.RootFS.DiffIDs) == 0 { + return "" + } + return identity.ChainID(img.RootFS.DiffIDs) +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource.go b/vendor/github.com/moby/buildkit/source/git/gitsource.go new file mode 100644 index 00000000..419bf3b6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/git/gitsource.go @@ -0,0 +1,441 @@ +package git + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/docker/pkg/locker" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/util/progress/logs" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + bolt "go.etcd.io/bbolt" +) + +var validHex = regexp.MustCompile(`^[a-f0-9]{40}$`) + +type Opt struct { + CacheAccessor cache.Accessor + MetadataStore *metadata.Store +} + +type gitSource struct { + md *metadata.Store + cache cache.Accessor + locker *locker.Locker +} + +// Supported returns nil if the system supports Git source +func Supported() error { + if err := exec.Command("git", "version").Run(); err != nil { + return errors.Wrap(err, "failed to find git binary") + } + return nil +} + +func NewSource(opt Opt) (source.Source, error) { + gs := &gitSource{ + md: opt.MetadataStore, + cache: opt.CacheAccessor, + locker: locker.New(), + } + return gs, nil +} + +func (gs *gitSource) ID() string { + return source.GitScheme +} + +// needs to be called with repo lock +func (gs *gitSource) mountRemote(ctx context.Context, remote string) (target string, release func(), retErr error) { + remoteKey := "git-remote::" + remote + + sis, err := gs.md.Search(remoteKey) + if err != nil { + return "", nil, errors.Wrapf(err, "failed to search metadata for %s", remote) + } + + var remoteRef cache.MutableRef + for _, si := range sis { + remoteRef, err = gs.cache.GetMutable(ctx, si.ID()) + if err != nil { + if cache.IsLocked(err) { + // should never really happen as no other function should access this metadata, but lets be graceful + logrus.Warnf("mutable ref for %s %s was locked: %v", remote, si.ID(), err) + continue + } + return "", nil, errors.Wrapf(err, "failed to get mutable ref for %s", remote) + } + break + } + + initializeRepo := false + if remoteRef == nil { + remoteRef, err = gs.cache.New(ctx, nil, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("shared git repo for %s", remote))) + if err != nil { + return "", nil, errors.Wrapf(err, "failed to create new mutable for %s", remote) + } + initializeRepo = true + } + + releaseRemoteRef := func() { + remoteRef.Release(context.TODO()) + } + + defer func() { + if retErr != nil && remoteRef != nil { + releaseRemoteRef() + } + }() + + mount, err := remoteRef.Mount(ctx, false) + if err != nil { + return "", nil, err + } + + lm := snapshot.LocalMounter(mount) + dir, err := lm.Mount() + if err != nil { + return "", nil, err + } + + defer func() { + if retErr != nil { + lm.Unmount() + } + }() + + if initializeRepo { + if _, err := gitWithinDir(ctx, dir, "", "init", "--bare"); err != nil { + return "", nil, errors.Wrapf(err, "failed to init repo at %s", dir) + } + + if _, err := gitWithinDir(ctx, dir, "", "remote", "add", "origin", remote); err != nil { + return "", nil, errors.Wrapf(err, "failed add origin repo at %s", dir) + } + + // same new remote metadata + si, _ := gs.md.Get(remoteRef.ID()) + v, err := metadata.NewValue(remoteKey) + v.Index = remoteKey + if err != nil { + return "", nil, err + } + + if err := si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, "git-remote", v) + }); err != nil { + return "", nil, err + } + } + return dir, func() { + lm.Unmount() + releaseRemoteRef() + }, nil +} + +type gitSourceHandler struct { + *gitSource + src source.GitIdentifier + cacheKey string +} + +func (gs *gitSourceHandler) shaToCacheKey(sha string) string { + key := sha + if gs.src.KeepGitDir { + key += ".git" + } + return key +} + +func (gs *gitSource) Resolve(ctx context.Context, id source.Identifier, _ *session.Manager) (source.SourceInstance, error) { + gitIdentifier, ok := id.(*source.GitIdentifier) + if !ok { + return nil, errors.Errorf("invalid git identifier %v", id) + } + + return &gitSourceHandler{ + src: *gitIdentifier, + gitSource: gs, + }, nil +} + +func (gs *gitSourceHandler) CacheKey(ctx context.Context, index int) (string, bool, error) { + remote := gs.src.Remote + ref := gs.src.Ref + if ref == "" { + ref = "master" + } + gs.locker.Lock(remote) + defer gs.locker.Unlock(remote) + + if isCommitSHA(ref) { + ref = gs.shaToCacheKey(ref) + gs.cacheKey = ref + return ref, true, nil + } + + gitDir, unmountGitDir, err := gs.mountRemote(ctx, remote) + if err != nil { + return "", false, err + } + defer unmountGitDir() + + // TODO: should we assume that remote tag is immutable? add a timer? + + buf, err := gitWithinDir(ctx, gitDir, "", "ls-remote", "origin", ref) + if err != nil { + return "", false, errors.Wrapf(err, "failed to fetch remote %s", remote) + } + out := buf.String() + idx := strings.Index(out, "\t") + if idx == -1 { + return "", false, errors.Errorf("failed to find commit SHA from output: %s", string(out)) + } + + sha := string(out[:idx]) + if !isCommitSHA(sha) { + return "", false, errors.Errorf("invalid commit sha %q", sha) + } + sha = gs.shaToCacheKey(sha) + gs.cacheKey = sha + return sha, true, nil +} + +func (gs *gitSourceHandler) Snapshot(ctx context.Context) (out cache.ImmutableRef, retErr error) { + ref := gs.src.Ref + if ref == "" { + ref = "master" + } + + cacheKey := gs.cacheKey + if cacheKey == "" { + var err error + cacheKey, _, err = gs.CacheKey(ctx, 0) + if err != nil { + return nil, err + } + } + + snapshotKey := "git-snapshot::" + cacheKey + ":" + gs.src.Subdir + gs.locker.Lock(snapshotKey) + defer gs.locker.Unlock(snapshotKey) + + sis, err := gs.md.Search(snapshotKey) + if err != nil { + return nil, errors.Wrapf(err, "failed to search metadata for %s", snapshotKey) + } + if len(sis) > 0 { + return gs.cache.Get(ctx, sis[0].ID()) + } + + gs.locker.Lock(gs.src.Remote) + defer gs.locker.Unlock(gs.src.Remote) + gitDir, unmountGitDir, err := gs.mountRemote(ctx, gs.src.Remote) + if err != nil { + return nil, err + } + defer unmountGitDir() + + doFetch := true + if isCommitSHA(ref) { + // skip fetch if commit already exists + if _, err := gitWithinDir(ctx, gitDir, "", "cat-file", "-e", ref+"^{commit}"); err == nil { + doFetch = false + } + } + + if doFetch { + // make sure no old lock files have leaked + os.RemoveAll(filepath.Join(gitDir, "shallow.lock")) + + args := []string{"fetch"} + if !isCommitSHA(ref) { // TODO: find a branch from ls-remote? + args = append(args, "--depth=1", "--no-tags") + } else { + if _, err := os.Lstat(filepath.Join(gitDir, "shallow")); err == nil { + args = append(args, "--unshallow") + } + } + args = append(args, "origin") + if !isCommitSHA(ref) { + args = append(args, "--force", ref+":tags/"+ref) + // local refs are needed so they would be advertised on next fetches. Force is used + // in case the ref is a branch and it now points to a different commit sha + // TODO: is there a better way to do this? + } + if _, err := gitWithinDir(ctx, gitDir, "", args...); err != nil { + return nil, errors.Wrapf(err, "failed to fetch remote %s", gs.src.Remote) + } + } + + checkoutRef, err := gs.cache.New(ctx, nil, cache.WithRecordType(client.UsageRecordTypeGitCheckout), cache.WithDescription(fmt.Sprintf("git snapshot for %s#%s", gs.src.Remote, ref))) + if err != nil { + return nil, errors.Wrapf(err, "failed to create new mutable for %s", gs.src.Remote) + } + + defer func() { + if retErr != nil && checkoutRef != nil { + checkoutRef.Release(context.TODO()) + } + }() + + mount, err := checkoutRef.Mount(ctx, false) + if err != nil { + return nil, err + } + lm := snapshot.LocalMounter(mount) + checkoutDir, err := lm.Mount() + if err != nil { + return nil, err + } + defer func() { + if retErr != nil && lm != nil { + lm.Unmount() + } + }() + + if gs.src.KeepGitDir { + checkoutDirGit := filepath.Join(checkoutDir, ".git") + if err := os.MkdirAll(checkoutDir, 0711); err != nil { + return nil, err + } + _, err = gitWithinDir(ctx, checkoutDirGit, "", "init") + if err != nil { + return nil, err + } + _, err = gitWithinDir(ctx, checkoutDirGit, "", "remote", "add", "origin", gitDir) + if err != nil { + return nil, err + } + pullref := ref + if isCommitSHA(ref) { + pullref = "refs/buildkit/" + identity.NewID() + _, err = gitWithinDir(ctx, gitDir, "", "update-ref", pullref, ref) + if err != nil { + return nil, err + } + } else { + pullref += ":" + pullref + } + _, err = gitWithinDir(ctx, checkoutDirGit, "", "fetch", "-u", "--depth=1", "origin", pullref) + if err != nil { + return nil, err + } + _, err = gitWithinDir(ctx, checkoutDirGit, checkoutDir, "checkout", "FETCH_HEAD") + if err != nil { + return nil, errors.Wrapf(err, "failed to checkout remote %s", gs.src.Remote) + } + gitDir = checkoutDirGit + } else { + _, err = gitWithinDir(ctx, gitDir, checkoutDir, "checkout", ref, "--", ".") + if err != nil { + return nil, errors.Wrapf(err, "failed to checkout remote %s", gs.src.Remote) + } + } + + _, err = gitWithinDir(ctx, gitDir, checkoutDir, "submodule", "update", "--init", "--recursive", "--depth=1") + if err != nil { + return nil, errors.Wrapf(err, "failed to update submodules for %s", gs.src.Remote) + } + + if idmap := mount.IdentityMapping(); idmap != nil { + u := idmap.RootPair() + err := filepath.Walk(gitDir, func(p string, f os.FileInfo, err error) error { + return os.Lchown(p, u.UID, u.GID) + }) + if err != nil { + return nil, errors.Wrap(err, "failed to remap git checkout") + } + } + + lm.Unmount() + lm = nil + + snap, err := checkoutRef.Commit(ctx) + if err != nil { + return nil, err + } + checkoutRef = nil + + defer func() { + if retErr != nil { + snap.Release(context.TODO()) + } + }() + + si, _ := gs.md.Get(snap.ID()) + v, err := metadata.NewValue(snapshotKey) + v.Index = snapshotKey + if err != nil { + return nil, err + } + if err := si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, "git-snapshot", v) + }); err != nil { + return nil, err + } + + return snap, nil +} + +func isCommitSHA(str string) bool { + return validHex.MatchString(str) +} + +func gitWithinDir(ctx context.Context, gitDir, workDir string, args ...string) (*bytes.Buffer, error) { + a := []string{"--git-dir", gitDir} + if workDir != "" { + a = append(a, "--work-tree", workDir) + } + return git(ctx, workDir, append(a, args...)...) +} + +func git(ctx context.Context, dir string, args ...string) (*bytes.Buffer, error) { + for { + stdout, stderr := logs.NewLogStreams(ctx, false) + defer stdout.Close() + defer stderr.Close() + cmd := exec.Command("git", args...) + cmd.Dir = dir // some commands like submodule require this + buf := bytes.NewBuffer(nil) + errbuf := bytes.NewBuffer(nil) + cmd.Stdout = io.MultiWriter(stdout, buf) + cmd.Stderr = io.MultiWriter(stderr, errbuf) + // remote git commands spawn helper processes that inherit FDs and don't + // handle parent death signal so exec.CommandContext can't be used + err := runProcessGroup(ctx, cmd) + if err != nil { + if strings.Contains(errbuf.String(), "--depth") || strings.Contains(errbuf.String(), "shallow") { + if newArgs := argsNoDepth(args); len(args) > len(newArgs) { + args = newArgs + continue + } + } + } + return buf, err + } +} + +func argsNoDepth(args []string) []string { + out := make([]string, 0, len(args)) + for _, a := range args { + if a != "--depth=1" { + out = append(out, a) + } + } + return out +} diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go b/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go new file mode 100644 index 00000000..5d35dd9c --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package git + +import ( + "context" + "os/exec" + "syscall" + "time" +) + +func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error { + cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} + if err := cmd.Start(); err != nil { + return err + } + waitDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + syscall.Kill(-cmd.Process.Pid, syscall.SIGTERM) + go func() { + select { + case <-waitDone: + case <-time.After(10 * time.Second): + syscall.Kill(-cmd.Process.Pid, syscall.SIGKILL) + } + }() + case <-waitDone: + } + }() + err := cmd.Wait() + close(waitDone) + return err +} diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go b/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go new file mode 100644 index 00000000..3435c8f9 --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package git + +import ( + "context" + "os/exec" +) + +func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error { + if err := cmd.Start(); err != nil { + return err + } + waitDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + cmd.Process.Kill() + case <-waitDone: + } + }() + return cmd.Wait() +} diff --git a/vendor/github.com/moby/buildkit/source/gitidentifier.go b/vendor/github.com/moby/buildkit/source/gitidentifier.go new file mode 100644 index 00000000..9f338343 --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/gitidentifier.go @@ -0,0 +1,70 @@ +package source + +import ( + "net/url" + "strings" + + "github.com/pkg/errors" +) + +type GitIdentifier struct { + Remote string + Ref string + Subdir string + KeepGitDir bool +} + +func NewGitIdentifier(remoteURL string) (*GitIdentifier, error) { + repo := GitIdentifier{} + + if !isGitTransport(remoteURL) { + remoteURL = "https://" + remoteURL + } + + var fragment string + if strings.HasPrefix(remoteURL, "git@") { + // git@.. is not an URL, so cannot be parsed as URL + parts := strings.SplitN(remoteURL, "#", 2) + + repo.Remote = parts[0] + if len(parts) == 2 { + fragment = parts[1] + } + repo.Ref, repo.Subdir = getRefAndSubdir(fragment) + } else { + u, err := url.Parse(remoteURL) + if err != nil { + return nil, err + } + + repo.Ref, repo.Subdir = getRefAndSubdir(u.Fragment) + u.Fragment = "" + repo.Remote = u.String() + } + if repo.Subdir != "" { + return nil, errors.Errorf("subdir not supported yet") + } + return &repo, nil +} + +func (i *GitIdentifier) ID() string { + return "git" +} + +// isGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func isGitTransport(str string) bool { + return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://") || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} + +func getRefAndSubdir(fragment string) (ref string, subdir string) { + refAndDir := strings.SplitN(fragment, ":", 2) + ref = "master" + if len(refAndDir[0]) != 0 { + ref = refAndDir[0] + } + if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { + subdir = refAndDir[1] + } + return +} diff --git a/vendor/github.com/moby/buildkit/source/http/httpsource.go b/vendor/github.com/moby/buildkit/source/http/httpsource.go new file mode 100644 index 00000000..9bde6195 --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/http/httpsource.go @@ -0,0 +1,500 @@ +package http + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "mime" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/locker" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/util/tracing" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +type Opt struct { + CacheAccessor cache.Accessor + MetadataStore *metadata.Store + Transport http.RoundTripper +} + +type httpSource struct { + md *metadata.Store + cache cache.Accessor + locker *locker.Locker + transport http.RoundTripper +} + +func NewSource(opt Opt) (source.Source, error) { + transport := opt.Transport + if transport == nil { + transport = tracing.DefaultTransport + } + hs := &httpSource{ + md: opt.MetadataStore, + cache: opt.CacheAccessor, + locker: locker.New(), + transport: transport, + } + return hs, nil +} + +func (hs *httpSource) ID() string { + return source.HttpsScheme +} + +type httpSourceHandler struct { + *httpSource + src source.HttpIdentifier + refID string + cacheKey digest.Digest + client *http.Client +} + +func (hs *httpSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager) (source.SourceInstance, error) { + httpIdentifier, ok := id.(*source.HttpIdentifier) + if !ok { + return nil, errors.Errorf("invalid http identifier %v", id) + } + + sessionID := session.FromContext(ctx) + + return &httpSourceHandler{ + src: *httpIdentifier, + httpSource: hs, + client: &http.Client{Transport: newTransport(hs.transport, sm, sessionID)}, + }, nil +} + +// urlHash is internal hash the etag is stored by that doesn't leak outside +// this package. +func (hs *httpSourceHandler) urlHash() (digest.Digest, error) { + dt, err := json.Marshal(struct { + Filename string + Perm, UID, GID int + }{ + Filename: getFileName(hs.src.URL, hs.src.Filename, nil), + Perm: hs.src.Perm, + UID: hs.src.UID, + GID: hs.src.GID, + }) + if err != nil { + return "", err + } + return digest.FromBytes(dt), nil +} + +func (hs *httpSourceHandler) formatCacheKey(filename string, dgst digest.Digest, lastModTime string) digest.Digest { + dt, err := json.Marshal(struct { + Filename string + Perm, UID, GID int + Checksum digest.Digest + LastModTime string `json:",omitempty"` + }{ + Filename: filename, + Perm: hs.src.Perm, + UID: hs.src.UID, + GID: hs.src.GID, + Checksum: dgst, + LastModTime: lastModTime, + }) + if err != nil { + return dgst + } + return digest.FromBytes(dt) +} + +func (hs *httpSourceHandler) CacheKey(ctx context.Context, index int) (string, bool, error) { + if hs.src.Checksum != "" { + hs.cacheKey = hs.src.Checksum + return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, nil), hs.src.Checksum, "").String(), true, nil + } + + uh, err := hs.urlHash() + if err != nil { + return "", false, nil + } + + // look up metadata(previously stored headers) for that URL + sis, err := hs.md.Search(uh.String()) + if err != nil { + return "", false, errors.Wrapf(err, "failed to search metadata for %s", uh) + } + + req, err := http.NewRequest("GET", hs.src.URL, nil) + if err != nil { + return "", false, err + } + req = req.WithContext(ctx) + m := map[string]*metadata.StorageItem{} + + // If we request a single ETag in 'If-None-Match', some servers omit the + // unambiguous ETag in their response. + // See: https://github.com/moby/buildkit/issues/905 + var onlyETag string + + if len(sis) > 0 { + for _, si := range sis { + // if metaDigest := getMetaDigest(si); metaDigest == hs.formatCacheKey("") { + if etag := getETag(si); etag != "" { + if dgst := getChecksum(si); dgst != "" { + m[etag] = si + } + } + // } + } + if len(m) > 0 { + etags := make([]string, 0, len(m)) + for t := range m { + etags = append(etags, t) + } + req.Header.Add("If-None-Match", strings.Join(etags, ", ")) + + if len(etags) == 1 { + onlyETag = etags[0] + } + } + } + + // Some servers seem to have trouble supporting If-None-Match properly even + // though they return ETag-s. So first, optionally try a HEAD request with + // manual ETag value comparison. + if len(m) > 0 { + req.Method = "HEAD" + resp, err := hs.client.Do(req) + if err == nil { + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotModified { + respETag := resp.Header.Get("ETag") + + // If a 304 is returned without an ETag and we had only sent one ETag, + // the response refers to the ETag we asked about. + if respETag == "" && onlyETag != "" && resp.StatusCode == http.StatusNotModified { + respETag = onlyETag + } + si, ok := m[respETag] + if ok { + hs.refID = si.ID() + dgst := getChecksum(si) + if dgst != "" { + modTime := getModTime(si) + resp.Body.Close() + return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, modTime).String(), true, nil + } + } + } + resp.Body.Close() + } + req.Method = "GET" + } + + resp, err := hs.client.Do(req) + if err != nil { + return "", false, err + } + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return "", false, errors.Errorf("invalid response status %d", resp.StatusCode) + } + if resp.StatusCode == http.StatusNotModified { + respETag := resp.Header.Get("ETag") + if respETag == "" && onlyETag != "" { + respETag = onlyETag + + // Set the missing ETag header on the response so that it's available + // to .save() + resp.Header.Set("ETag", onlyETag) + } + si, ok := m[respETag] + if !ok { + return "", false, errors.Errorf("invalid not-modified ETag: %v", respETag) + } + hs.refID = si.ID() + dgst := getChecksum(si) + if dgst == "" { + return "", false, errors.Errorf("invalid metadata change") + } + modTime := getModTime(si) + resp.Body.Close() + return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, modTime).String(), true, nil + } + + ref, dgst, err := hs.save(ctx, resp) + if err != nil { + return "", false, err + } + ref.Release(context.TODO()) + + hs.cacheKey = dgst + + return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, resp.Header.Get("Last-Modified")).String(), true, nil +} + +func (hs *httpSourceHandler) save(ctx context.Context, resp *http.Response) (ref cache.ImmutableRef, dgst digest.Digest, retErr error) { + newRef, err := hs.cache.New(ctx, nil, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("http url %s", hs.src.URL))) + if err != nil { + return nil, "", err + } + + releaseRef := func() { + newRef.Release(context.TODO()) + } + + defer func() { + if retErr != nil && newRef != nil { + releaseRef() + } + }() + + mount, err := newRef.Mount(ctx, false) + if err != nil { + return nil, "", err + } + + lm := snapshot.LocalMounter(mount) + dir, err := lm.Mount() + if err != nil { + return nil, "", err + } + + defer func() { + if retErr != nil && lm != nil { + lm.Unmount() + } + }() + perm := 0600 + if hs.src.Perm != 0 { + perm = hs.src.Perm + } + fp := filepath.Join(dir, getFileName(hs.src.URL, hs.src.Filename, resp)) + + f, err := os.OpenFile(fp, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(perm)) + if err != nil { + return nil, "", err + } + defer func() { + if f != nil { + f.Close() + } + }() + + h := sha256.New() + + if _, err := io.Copy(io.MultiWriter(f, h), resp.Body); err != nil { + return nil, "", err + } + + if err := f.Close(); err != nil { + return nil, "", err + } + f = nil + + uid := hs.src.UID + gid := hs.src.GID + if idmap := mount.IdentityMapping(); idmap != nil { + identity, err := idmap.ToHost(idtools.Identity{ + UID: int(uid), + GID: int(gid), + }) + if err != nil { + return nil, "", err + } + uid = identity.UID + gid = identity.GID + } + + if gid != 0 || uid != 0 { + if err := os.Chown(fp, uid, gid); err != nil { + return nil, "", err + } + } + + mTime := time.Unix(0, 0) + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + + if err := os.Chtimes(fp, mTime, mTime); err != nil { + return nil, "", err + } + + lm.Unmount() + lm = nil + + ref, err = newRef.Commit(ctx) + if err != nil { + return nil, "", err + } + newRef = nil + + hs.refID = ref.ID() + dgst = digest.NewDigest(digest.SHA256, h) + + if respETag := resp.Header.Get("ETag"); respETag != "" { + setETag(ref.Metadata(), respETag) + uh, err := hs.urlHash() + if err != nil { + return nil, "", err + } + setChecksum(ref.Metadata(), uh.String(), dgst) + if err := ref.Metadata().Commit(); err != nil { + return nil, "", err + } + } + + if modTime := resp.Header.Get("Last-Modified"); modTime != "" { + setModTime(ref.Metadata(), modTime) + } + + return ref, dgst, nil +} + +func (hs *httpSourceHandler) Snapshot(ctx context.Context) (cache.ImmutableRef, error) { + if hs.refID != "" { + ref, err := hs.cache.Get(ctx, hs.refID) + if err == nil { + return ref, nil + } + } + + req, err := http.NewRequest("GET", hs.src.URL, nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + + resp, err := hs.client.Do(req) + if err != nil { + return nil, err + } + + ref, dgst, err := hs.save(ctx, resp) + if err != nil { + return nil, err + } + if dgst != hs.cacheKey { + ref.Release(context.TODO()) + return nil, errors.Errorf("digest mismatch %s: %s", dgst, hs.cacheKey) + } + + return ref, nil +} + +const keyETag = "etag" +const keyChecksum = "http.checksum" +const keyModTime = "http.modtime" + +func setETag(si *metadata.StorageItem, s string) error { + v, err := metadata.NewValue(s) + if err != nil { + return errors.Wrap(err, "failed to create etag value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyETag, v) + }) + return nil +} + +func getETag(si *metadata.StorageItem) string { + v := si.Get(keyETag) + if v == nil { + return "" + } + var etag string + if err := v.Unmarshal(&etag); err != nil { + return "" + } + return etag +} + +func setModTime(si *metadata.StorageItem, s string) error { + v, err := metadata.NewValue(s) + if err != nil { + return errors.Wrap(err, "failed to create modtime value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyModTime, v) + }) + return nil +} + +func getModTime(si *metadata.StorageItem) string { + v := si.Get(keyModTime) + if v == nil { + return "" + } + var modTime string + if err := v.Unmarshal(&modTime); err != nil { + return "" + } + return modTime +} + +func setChecksum(si *metadata.StorageItem, url string, d digest.Digest) error { + v, err := metadata.NewValue(d) + if err != nil { + return errors.Wrap(err, "failed to create checksum value") + } + v.Index = url + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyChecksum, v) + }) + return nil +} + +func getChecksum(si *metadata.StorageItem) digest.Digest { + v := si.Get(keyChecksum) + if v == nil { + return "" + } + var dgstStr string + if err := v.Unmarshal(&dgstStr); err != nil { + return "" + } + dgst, err := digest.Parse(dgstStr) + if err != nil { + return "" + } + return dgst +} + +func getFileName(urlStr, manualFilename string, resp *http.Response) string { + if manualFilename != "" { + return manualFilename + } + if resp != nil { + if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" { + if _, params, err := mime.ParseMediaType(contentDisposition); err == nil { + if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") { + if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" { + return filename + } + } + } + } + } + u, err := url.Parse(urlStr) + if err == nil { + if base := path.Base(u.Path); base != "." && base != "/" { + return base + } + } + return "download" +} diff --git a/vendor/github.com/moby/buildkit/source/http/transport.go b/vendor/github.com/moby/buildkit/source/http/transport.go new file mode 100644 index 00000000..0ce89b78 --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/http/transport.go @@ -0,0 +1,60 @@ +package http + +import ( + "context" + "io" + "net/http" + "time" + + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/upload" + "github.com/pkg/errors" +) + +func newTransport(rt http.RoundTripper, sm *session.Manager, id string) http.RoundTripper { + return &sessionHandler{rt: rt, sm: sm, id: id} +} + +type sessionHandler struct { + sm *session.Manager + rt http.RoundTripper + id string +} + +func (h *sessionHandler) RoundTrip(req *http.Request) (*http.Response, error) { + if req.URL.Host != "buildkit-session" { + return h.rt.RoundTrip(req) + } + + if req.Method != "GET" { + return nil, errors.Errorf("invalid request") + } + + timeoutCtx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) + defer cancel() + + caller, err := h.sm.Get(timeoutCtx, h.id) + if err != nil { + return nil, err + } + + up, err := upload.New(context.TODO(), caller, req.URL) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + go func() { + _, err := up.WriteTo(pw) + pw.CloseWithError(err) + }() + + resp := &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: pr, + ContentLength: -1, + } + + return resp, nil +} diff --git a/vendor/github.com/moby/buildkit/source/identifier.go b/vendor/github.com/moby/buildkit/source/identifier.go new file mode 100644 index 00000000..2a861109 --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/identifier.go @@ -0,0 +1,275 @@ +package source + +import ( + "encoding/json" + "strconv" + "strings" + + "github.com/containerd/containerd/reference" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + errInvalid = errors.New("invalid") + errNotFound = errors.New("not found") +) + +type ResolveMode int + +const ( + ResolveModeDefault ResolveMode = iota + ResolveModeForcePull + ResolveModePreferLocal +) + +const ( + DockerImageScheme = "docker-image" + GitScheme = "git" + LocalScheme = "local" + HttpScheme = "http" + HttpsScheme = "https" +) + +type Identifier interface { + ID() string // until sources are in process this string comparison could be avoided +} + +func FromString(s string) (Identifier, error) { + // TODO: improve this + parts := strings.SplitN(s, "://", 2) + if len(parts) != 2 { + return nil, errors.Wrapf(errInvalid, "failed to parse %s", s) + } + + switch parts[0] { + case DockerImageScheme: + return NewImageIdentifier(parts[1]) + case GitScheme: + return NewGitIdentifier(parts[1]) + case LocalScheme: + return NewLocalIdentifier(parts[1]) + case HttpsScheme: + return NewHttpIdentifier(parts[1], true) + case HttpScheme: + return NewHttpIdentifier(parts[1], false) + default: + return nil, errors.Wrapf(errNotFound, "unknown schema %s", parts[0]) + } +} + +func FromLLB(op *pb.Op_Source, platform *pb.Platform) (Identifier, error) { + id, err := FromString(op.Source.Identifier) + if err != nil { + return nil, err + } + + if id, ok := id.(*ImageIdentifier); ok { + if platform != nil { + id.Platform = &specs.Platform{ + OS: platform.OS, + Architecture: platform.Architecture, + Variant: platform.Variant, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + } + } + for k, v := range op.Source.Attrs { + switch k { + case pb.AttrImageResolveMode: + rm, err := ParseImageResolveMode(v) + if err != nil { + return nil, err + } + id.ResolveMode = rm + case pb.AttrImageRecordType: + rt, err := parseImageRecordType(v) + if err != nil { + return nil, err + } + id.RecordType = rt + } + } + } + if id, ok := id.(*GitIdentifier); ok { + for k, v := range op.Source.Attrs { + switch k { + case pb.AttrKeepGitDir: + if v == "true" { + id.KeepGitDir = true + } + case pb.AttrFullRemoteURL: + id.Remote = v + } + } + } + if id, ok := id.(*LocalIdentifier); ok { + for k, v := range op.Source.Attrs { + switch k { + case pb.AttrLocalSessionID: + id.SessionID = v + if p := strings.SplitN(v, ":", 2); len(p) == 2 { + id.Name = p[0] + "-" + id.Name + id.SessionID = p[1] + } + case pb.AttrIncludePatterns: + var patterns []string + if err := json.Unmarshal([]byte(v), &patterns); err != nil { + return nil, err + } + id.IncludePatterns = patterns + case pb.AttrExcludePatterns: + var patterns []string + if err := json.Unmarshal([]byte(v), &patterns); err != nil { + return nil, err + } + id.ExcludePatterns = patterns + case pb.AttrFollowPaths: + var paths []string + if err := json.Unmarshal([]byte(v), &paths); err != nil { + return nil, err + } + id.FollowPaths = paths + case pb.AttrSharedKeyHint: + id.SharedKeyHint = v + } + } + } + if id, ok := id.(*HttpIdentifier); ok { + for k, v := range op.Source.Attrs { + switch k { + case pb.AttrHTTPChecksum: + dgst, err := digest.Parse(v) + if err != nil { + return nil, err + } + id.Checksum = dgst + case pb.AttrHTTPFilename: + id.Filename = v + case pb.AttrHTTPPerm: + i, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, err + } + id.Perm = int(i) + case pb.AttrHTTPUID: + i, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, err + } + id.UID = int(i) + case pb.AttrHTTPGID: + i, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, err + } + id.GID = int(i) + } + } + } + return id, nil +} + +type ImageIdentifier struct { + Reference reference.Spec + Platform *specs.Platform + ResolveMode ResolveMode + RecordType client.UsageRecordType +} + +func NewImageIdentifier(str string) (*ImageIdentifier, error) { + ref, err := reference.Parse(str) + if err != nil { + return nil, errors.WithStack(err) + } + + if ref.Object == "" { + return nil, errors.WithStack(reference.ErrObjectRequired) + } + return &ImageIdentifier{Reference: ref}, nil +} + +func (_ *ImageIdentifier) ID() string { + return DockerImageScheme +} + +type LocalIdentifier struct { + Name string + SessionID string + IncludePatterns []string + ExcludePatterns []string + FollowPaths []string + SharedKeyHint string +} + +func NewLocalIdentifier(str string) (*LocalIdentifier, error) { + return &LocalIdentifier{Name: str}, nil +} + +func (*LocalIdentifier) ID() string { + return LocalScheme +} + +func NewHttpIdentifier(str string, tls bool) (*HttpIdentifier, error) { + proto := "https://" + if !tls { + proto = "http://" + } + return &HttpIdentifier{TLS: tls, URL: proto + str}, nil +} + +type HttpIdentifier struct { + TLS bool + URL string + Checksum digest.Digest + Filename string + Perm int + UID int + GID int +} + +func (_ *HttpIdentifier) ID() string { + return HttpsScheme +} + +func (r ResolveMode) String() string { + switch r { + case ResolveModeDefault: + return pb.AttrImageResolveModeDefault + case ResolveModeForcePull: + return pb.AttrImageResolveModeForcePull + case ResolveModePreferLocal: + return pb.AttrImageResolveModePreferLocal + default: + return "" + } +} + +func ParseImageResolveMode(v string) (ResolveMode, error) { + switch v { + case pb.AttrImageResolveModeDefault, "": + return ResolveModeDefault, nil + case pb.AttrImageResolveModeForcePull: + return ResolveModeForcePull, nil + case pb.AttrImageResolveModePreferLocal: + return ResolveModePreferLocal, nil + default: + return 0, errors.Errorf("invalid resolvemode: %s", v) + } +} + +func parseImageRecordType(v string) (client.UsageRecordType, error) { + switch client.UsageRecordType(v) { + case "", client.UsageRecordTypeRegular: + return client.UsageRecordTypeRegular, nil + case client.UsageRecordTypeInternal: + return client.UsageRecordTypeInternal, nil + case client.UsageRecordTypeFrontend: + return client.UsageRecordTypeFrontend, nil + default: + return "", errors.Errorf("invalid record type %s", v) + } +} diff --git a/vendor/github.com/moby/buildkit/source/local/local.go b/vendor/github.com/moby/buildkit/source/local/local.go new file mode 100644 index 00000000..6fcbcdb4 --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/local/local.go @@ -0,0 +1,279 @@ +package local + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/contenthash" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/filesync" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/util/progress" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/tonistiigi/fsutil" + fstypes "github.com/tonistiigi/fsutil/types" + bolt "go.etcd.io/bbolt" + "golang.org/x/time/rate" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const keySharedKey = "local.sharedKey" + +type Opt struct { + CacheAccessor cache.Accessor + MetadataStore *metadata.Store +} + +func NewSource(opt Opt) (source.Source, error) { + ls := &localSource{ + cm: opt.CacheAccessor, + md: opt.MetadataStore, + } + return ls, nil +} + +type localSource struct { + cm cache.Accessor + md *metadata.Store +} + +func (ls *localSource) ID() string { + return source.LocalScheme +} + +func (ls *localSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager) (source.SourceInstance, error) { + localIdentifier, ok := id.(*source.LocalIdentifier) + if !ok { + return nil, errors.Errorf("invalid local identifier %v", id) + } + + return &localSourceHandler{ + src: *localIdentifier, + sm: sm, + localSource: ls, + }, nil +} + +type localSourceHandler struct { + src source.LocalIdentifier + sm *session.Manager + *localSource +} + +func (ls *localSourceHandler) CacheKey(ctx context.Context, index int) (string, bool, error) { + sessionID := ls.src.SessionID + + if sessionID == "" { + id := session.FromContext(ctx) + if id == "" { + return "", false, errors.New("could not access local files without session") + } + sessionID = id + } + dt, err := json.Marshal(struct { + SessionID string + IncludePatterns []string + ExcludePatterns []string + FollowPaths []string + }{SessionID: sessionID, IncludePatterns: ls.src.IncludePatterns, ExcludePatterns: ls.src.ExcludePatterns, FollowPaths: ls.src.FollowPaths}) + if err != nil { + return "", false, err + } + return "session:" + ls.src.Name + ":" + digest.FromBytes(dt).String(), true, nil +} + +func (ls *localSourceHandler) Snapshot(ctx context.Context) (out cache.ImmutableRef, retErr error) { + + id := session.FromContext(ctx) + if id == "" { + return nil, errors.New("could not access local files without session") + } + + timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + caller, err := ls.sm.Get(timeoutCtx, id) + if err != nil { + return nil, err + } + + sharedKey := keySharedKey + ":" + ls.src.Name + ":" + ls.src.SharedKeyHint + ":" + caller.SharedKey() // TODO: replace caller.SharedKey() with source based hint from client(absolute-path+nodeid) + + var mutable cache.MutableRef + sis, err := ls.md.Search(sharedKey) + if err != nil { + return nil, err + } + for _, si := range sis { + if m, err := ls.cm.GetMutable(ctx, si.ID()); err == nil { + logrus.Debugf("reusing ref for local: %s", m.ID()) + mutable = m + break + } + } + + if mutable == nil { + m, err := ls.cm.New(ctx, nil, cache.CachePolicyRetain, cache.WithRecordType(client.UsageRecordTypeLocalSource), cache.WithDescription(fmt.Sprintf("local source for %s", ls.src.Name))) + if err != nil { + return nil, err + } + mutable = m + logrus.Debugf("new ref for local: %s", mutable.ID()) + } + + defer func() { + if retErr != nil && mutable != nil { + // on error remove the record as checksum update is in undefined state + cache.CachePolicyDefault(mutable) + if err := mutable.Metadata().Commit(); err != nil { + logrus.Errorf("failed to reset mutable cachepolicy: %v", err) + } + contenthash.ClearCacheContext(mutable.Metadata()) + go mutable.Release(context.TODO()) + } + }() + + mount, err := mutable.Mount(ctx, false) + if err != nil { + return nil, err + } + + lm := snapshot.LocalMounter(mount) + + dest, err := lm.Mount() + if err != nil { + return nil, err + } + + defer func() { + if retErr != nil && lm != nil { + lm.Unmount() + } + }() + + cc, err := contenthash.GetCacheContext(ctx, mutable.Metadata(), mount.IdentityMapping()) + if err != nil { + return nil, err + } + + opt := filesync.FSSendRequestOpt{ + Name: ls.src.Name, + IncludePatterns: ls.src.IncludePatterns, + ExcludePatterns: ls.src.ExcludePatterns, + FollowPaths: ls.src.FollowPaths, + OverrideExcludes: false, + DestDir: dest, + CacheUpdater: &cacheUpdater{cc, mount.IdentityMapping()}, + ProgressCb: newProgressHandler(ctx, "transferring "+ls.src.Name+":"), + } + + if idmap := mount.IdentityMapping(); idmap != nil { + opt.Filter = func(p string, stat *fstypes.Stat) bool { + identity, err := idmap.ToHost(idtools.Identity{ + UID: int(stat.Uid), + GID: int(stat.Gid), + }) + if err != nil { + return false + } + stat.Uid = uint32(identity.UID) + stat.Gid = uint32(identity.GID) + return true + } + } + + if err := filesync.FSSync(ctx, caller, opt); err != nil { + if status.Code(err) == codes.NotFound { + return nil, errors.Errorf("local source %s not enabled from the client", ls.src.Name) + } + return nil, err + } + + if err := lm.Unmount(); err != nil { + return nil, err + } + lm = nil + + if err := contenthash.SetCacheContext(ctx, mutable.Metadata(), cc); err != nil { + return nil, err + } + + // skip storing snapshot by the shared key if it already exists + skipStoreSharedKey := false + si, _ := ls.md.Get(mutable.ID()) + if v := si.Get(keySharedKey); v != nil { + var str string + if err := v.Unmarshal(&str); err != nil { + return nil, err + } + skipStoreSharedKey = str == sharedKey + } + if !skipStoreSharedKey { + v, err := metadata.NewValue(sharedKey) + if err != nil { + return nil, err + } + v.Index = sharedKey + if err := si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, sharedKey, v) + }); err != nil { + return nil, err + } + logrus.Debugf("saved %s as %s", mutable.ID(), sharedKey) + } + + snap, err := mutable.Commit(ctx) + if err != nil { + return nil, err + } + + mutable = nil // avoid deferred cleanup + + return snap, nil +} + +func newProgressHandler(ctx context.Context, id string) func(int, bool) { + limiter := rate.NewLimiter(rate.Every(100*time.Millisecond), 1) + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + Action: "transferring", + } + pw.Write(id, st) + return func(s int, last bool) { + if last || limiter.Allow() { + st.Current = s + if last { + now := time.Now() + st.Completed = &now + } + pw.Write(id, st) + if last { + pw.Close() + } + } + } +} + +type cacheUpdater struct { + contenthash.CacheContext + idmap *idtools.IdentityMapping +} + +func (cu *cacheUpdater) MarkSupported(bool) { +} + +func (cu *cacheUpdater) ContentHasher() fsutil.ContentHasher { + return contenthash.NewFromStat +} diff --git a/vendor/github.com/moby/buildkit/source/manager.go b/vendor/github.com/moby/buildkit/source/manager.go new file mode 100644 index 00000000..542b57ff --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/manager.go @@ -0,0 +1,49 @@ +package source + +import ( + "context" + "sync" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/session" + "github.com/pkg/errors" +) + +type Source interface { + ID() string + Resolve(ctx context.Context, id Identifier, sm *session.Manager) (SourceInstance, error) +} + +type SourceInstance interface { + CacheKey(ctx context.Context, index int) (string, bool, error) + Snapshot(ctx context.Context) (cache.ImmutableRef, error) +} + +type Manager struct { + mu sync.Mutex + sources map[string]Source +} + +func NewManager() (*Manager, error) { + return &Manager{ + sources: make(map[string]Source), + }, nil +} + +func (sm *Manager) Register(src Source) { + sm.mu.Lock() + sm.sources[src.ID()] = src + sm.mu.Unlock() +} + +func (sm *Manager) Resolve(ctx context.Context, id Identifier, sessM *session.Manager) (SourceInstance, error) { + sm.mu.Lock() + src, ok := sm.sources[id.ID()] + sm.mu.Unlock() + + if !ok { + return nil, errors.Errorf("no handler for %s", id.ID()) + } + + return src.Resolve(ctx, id, sessM) +} diff --git a/vendor/github.com/moby/buildkit/util/apicaps/caps.go b/vendor/github.com/moby/buildkit/util/apicaps/caps.go new file mode 100644 index 00000000..9a661b50 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/apicaps/caps.go @@ -0,0 +1,162 @@ +package apicaps + +import ( + "fmt" + "sort" + "strings" + + pb "github.com/moby/buildkit/util/apicaps/pb" + "github.com/pkg/errors" +) + +type PBCap = pb.APICap + +// ExportedProduct is the name of the product using this package. +// Users vendoring this library may override it to provide better versioning hints +// for their users (or set it with a flag to buildkitd). +var ExportedProduct string + +// CapStatus defines the stability properties of a capability +type CapStatus int + +const ( + // CapStatusStable refers to a capability that should never be changed in + // backwards incompatible manner unless there is a serious security issue. + CapStatusStable CapStatus = iota + // CapStatusExperimental refers to a capability that may be removed in the future. + // If incompatible changes are made the previous ID is disabled and new is added. + CapStatusExperimental + // CapStatusPrerelease is same as CapStatusExperimental that can be used for new + // features before they move to stable. + CapStatusPrerelease +) + +// CapID is type for capability identifier +type CapID string + +// Cap describes an API feature +type Cap struct { + ID CapID + Name string // readable name, may contain spaces but keep in one sentence + Status CapStatus + Enabled bool + Deprecated bool + SupportedHint map[string]string + DisabledReason string + DisabledReasonMsg string + DisabledAlternative string +} + +// CapList is a collection of capability definitions +type CapList struct { + m map[CapID]Cap +} + +// Init initializes definition for a new capability. +// Not safe to be called concurrently with other methods. +func (l *CapList) Init(cc ...Cap) { + if l.m == nil { + l.m = make(map[CapID]Cap, len(cc)) + } + for _, c := range cc { + l.m[c.ID] = c + } +} + +// All reports the configuration of all known capabilities +func (l *CapList) All() []pb.APICap { + out := make([]pb.APICap, 0, len(l.m)) + for _, c := range l.m { + out = append(out, pb.APICap{ + ID: string(c.ID), + Enabled: c.Enabled, + Deprecated: c.Deprecated, + DisabledReason: c.DisabledReason, + DisabledReasonMsg: c.DisabledReasonMsg, + DisabledAlternative: c.DisabledAlternative, + }) + } + sort.Slice(out, func(i, j int) bool { + return out[i].ID < out[j].ID + }) + return out +} + +// CapSet returns a CapSet for an capability configuration +func (l *CapList) CapSet(caps []pb.APICap) CapSet { + m := make(map[string]*pb.APICap, len(caps)) + for _, c := range caps { + if c.ID != "" { + c := c // capture loop iterator + m[c.ID] = &c + } + } + return CapSet{ + list: l, + set: m, + } +} + +// CapSet is a configuration for detecting supported capabilities +type CapSet struct { + list *CapList + set map[string]*pb.APICap +} + +// Supports returns an error if capability is not supported +func (s *CapSet) Supports(id CapID) error { + err := &CapError{ID: id} + c, ok := s.list.m[id] + if !ok { + return errors.WithStack(err) + } + err.Definition = &c + state, ok := s.set[string(id)] + if !ok { + return errors.WithStack(err) + } + err.State = state + if !state.Enabled { + return errors.WithStack(err) + } + return nil +} + +// CapError is an error for unsupported capability +type CapError struct { + ID CapID + Definition *Cap + State *pb.APICap +} + +func (e CapError) Error() string { + if e.Definition == nil { + return fmt.Sprintf("unknown API capability %s", e.ID) + } + typ := "" + if e.Definition.Status == CapStatusExperimental { + typ = "experimental " + } + if e.Definition.Status == CapStatusPrerelease { + typ = "prerelease " + } + name := "" + if e.Definition.Name != "" { + name = "(" + e.Definition.Name + ")" + } + b := &strings.Builder{} + fmt.Fprintf(b, "requested %sfeature %s %s", typ, e.ID, name) + if e.State == nil { + fmt.Fprint(b, " is not supported by build server") + if hint, ok := e.Definition.SupportedHint[ExportedProduct]; ok { + fmt.Fprintf(b, " (added in %s)", hint) + } + fmt.Fprintf(b, ", please update %s", ExportedProduct) + } else { + fmt.Fprint(b, " has been disabled on the build server") + if e.State.DisabledReasonMsg != "" { + fmt.Fprintf(b, ": %s", e.State.DisabledReasonMsg) + } + } + return b.String() +} diff --git a/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go b/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go new file mode 100644 index 00000000..3977e4d6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go @@ -0,0 +1,570 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: caps.proto + +package moby_buildkit_v1_apicaps + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// APICap defines a capability supported by the service +type APICap struct { + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Enabled bool `protobuf:"varint,2,opt,name=Enabled,proto3" json:"Enabled,omitempty"` + Deprecated bool `protobuf:"varint,3,opt,name=Deprecated,proto3" json:"Deprecated,omitempty"` + DisabledReason string `protobuf:"bytes,4,opt,name=DisabledReason,proto3" json:"DisabledReason,omitempty"` + DisabledReasonMsg string `protobuf:"bytes,5,opt,name=DisabledReasonMsg,proto3" json:"DisabledReasonMsg,omitempty"` + DisabledAlternative string `protobuf:"bytes,6,opt,name=DisabledAlternative,proto3" json:"DisabledAlternative,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *APICap) Reset() { *m = APICap{} } +func (m *APICap) String() string { return proto.CompactTextString(m) } +func (*APICap) ProtoMessage() {} +func (*APICap) Descriptor() ([]byte, []int) { + return fileDescriptor_e19c39d9fcb89b83, []int{0} +} +func (m *APICap) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APICap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_APICap.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *APICap) XXX_Merge(src proto.Message) { + xxx_messageInfo_APICap.Merge(m, src) +} +func (m *APICap) XXX_Size() int { + return m.Size() +} +func (m *APICap) XXX_DiscardUnknown() { + xxx_messageInfo_APICap.DiscardUnknown(m) +} + +var xxx_messageInfo_APICap proto.InternalMessageInfo + +func (m *APICap) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *APICap) GetEnabled() bool { + if m != nil { + return m.Enabled + } + return false +} + +func (m *APICap) GetDeprecated() bool { + if m != nil { + return m.Deprecated + } + return false +} + +func (m *APICap) GetDisabledReason() string { + if m != nil { + return m.DisabledReason + } + return "" +} + +func (m *APICap) GetDisabledReasonMsg() string { + if m != nil { + return m.DisabledReasonMsg + } + return "" +} + +func (m *APICap) GetDisabledAlternative() string { + if m != nil { + return m.DisabledAlternative + } + return "" +} + +func init() { + proto.RegisterType((*APICap)(nil), "moby.buildkit.v1.apicaps.APICap") +} + +func init() { proto.RegisterFile("caps.proto", fileDescriptor_e19c39d9fcb89b83) } + +var fileDescriptor_e19c39d9fcb89b83 = []byte{ + // 236 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x4e, 0x2c, 0x28, + 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, 0x4b, 0x2a, 0xcd, + 0xcc, 0x49, 0xc9, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0xd4, 0x4b, 0x2c, 0xc8, 0x04, 0xc9, 0x4b, 0xe9, + 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb, + 0x83, 0x35, 0x24, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0x31, 0x48, 0xe9, 0x16, 0x23, + 0x17, 0x9b, 0x63, 0x80, 0xa7, 0x73, 0x62, 0x81, 0x10, 0x1f, 0x17, 0x93, 0xa7, 0x8b, 0x04, 0xa3, + 0x02, 0xa3, 0x06, 0x67, 0x10, 0x93, 0xa7, 0x8b, 0x90, 0x04, 0x17, 0xbb, 0x6b, 0x5e, 0x62, 0x52, + 0x4e, 0x6a, 0x8a, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x8c, 0x2b, 0x24, 0xc7, 0xc5, 0xe5, + 0x92, 0x5a, 0x50, 0x94, 0x9a, 0x9c, 0x58, 0x92, 0x9a, 0x22, 0xc1, 0x0c, 0x96, 0x44, 0x12, 0x11, + 0x52, 0xe3, 0xe2, 0x73, 0xc9, 0x2c, 0x06, 0xab, 0x0d, 0x4a, 0x4d, 0x2c, 0xce, 0xcf, 0x93, 0x60, + 0x01, 0x9b, 0x8a, 0x26, 0x2a, 0xa4, 0xc3, 0x25, 0x88, 0x2a, 0xe2, 0x5b, 0x9c, 0x2e, 0xc1, 0x0a, + 0x56, 0x8a, 0x29, 0x21, 0x64, 0xc0, 0x25, 0x0c, 0x13, 0x74, 0xcc, 0x29, 0x49, 0x2d, 0xca, 0x4b, + 0x2c, 0xc9, 0x2c, 0x4b, 0x95, 0x60, 0x03, 0xab, 0xc7, 0x26, 0xe5, 0xc4, 0x73, 0xe2, 0x91, 0x1c, + 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x26, 0xb1, 0x81, 0x7d, 0x6c, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0x02, 0x2d, 0x9e, 0x91, 0x48, 0x01, 0x00, 0x00, +} + +func (m *APICap) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APICap) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APICap) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.DisabledAlternative) > 0 { + i -= len(m.DisabledAlternative) + copy(dAtA[i:], m.DisabledAlternative) + i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledAlternative))) + i-- + dAtA[i] = 0x32 + } + if len(m.DisabledReasonMsg) > 0 { + i -= len(m.DisabledReasonMsg) + copy(dAtA[i:], m.DisabledReasonMsg) + i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledReasonMsg))) + i-- + dAtA[i] = 0x2a + } + if len(m.DisabledReason) > 0 { + i -= len(m.DisabledReason) + copy(dAtA[i:], m.DisabledReason) + i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledReason))) + i-- + dAtA[i] = 0x22 + } + if m.Deprecated { + i-- + if m.Deprecated { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Enabled { + i-- + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.ID) > 0 { + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintCaps(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintCaps(dAtA []byte, offset int, v uint64) int { + offset -= sovCaps(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *APICap) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovCaps(uint64(l)) + } + if m.Enabled { + n += 2 + } + if m.Deprecated { + n += 2 + } + l = len(m.DisabledReason) + if l > 0 { + n += 1 + l + sovCaps(uint64(l)) + } + l = len(m.DisabledReasonMsg) + if l > 0 { + n += 1 + l + sovCaps(uint64(l)) + } + l = len(m.DisabledAlternative) + if l > 0 { + n += 1 + l + sovCaps(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovCaps(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCaps(x uint64) (n int) { + return sovCaps(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *APICap) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCaps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APICap: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APICap: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCaps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCaps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCaps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCaps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Enabled = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Deprecated", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCaps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Deprecated = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisabledReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCaps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCaps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCaps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DisabledReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisabledReasonMsg", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCaps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCaps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCaps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DisabledReasonMsg = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisabledAlternative", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCaps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCaps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCaps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DisabledAlternative = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCaps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCaps + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthCaps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCaps(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCaps + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCaps + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCaps + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCaps + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCaps + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCaps + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCaps = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCaps = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCaps = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto b/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto new file mode 100644 index 00000000..1e8c0651 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package moby.buildkit.v1.apicaps; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.sizer_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// APICap defines a capability supported by the service +message APICap { + string ID = 1; + bool Enabled = 2; + bool Deprecated = 3; // Unused. May be used for warnings in the future + string DisabledReason = 4; // Reason key for detection code + string DisabledReasonMsg = 5; // Message to the user + string DisabledAlternative = 6; // Identifier that updated client could catch. +} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go b/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go new file mode 100644 index 00000000..281dfabd --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go @@ -0,0 +1,3 @@ +package moby_buildkit_v1_apicaps + +//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. caps.proto diff --git a/vendor/github.com/moby/buildkit/util/appcontext/appcontext.go b/vendor/github.com/moby/buildkit/util/appcontext/appcontext.go new file mode 100644 index 00000000..e74da2cd --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/appcontext/appcontext.go @@ -0,0 +1,41 @@ +package appcontext + +import ( + "context" + "os" + "os/signal" + "sync" + + "github.com/sirupsen/logrus" +) + +var appContextCache context.Context +var appContextOnce sync.Once + +// Context returns a static context that reacts to termination signals of the +// running process. Useful in CLI tools. +func Context() context.Context { + appContextOnce.Do(func() { + signals := make(chan os.Signal, 2048) + signal.Notify(signals, terminationSignals...) + + const exitLimit = 3 + retries := 0 + + ctx, cancel := context.WithCancel(context.Background()) + appContextCache = ctx + + go func() { + for { + <-signals + cancel() + retries++ + if retries >= exitLimit { + logrus.Errorf("got %d SIGTERM/SIGINTs, forcing shutdown", retries) + os.Exit(1) + } + } + }() + }) + return appContextCache +} diff --git a/vendor/github.com/moby/buildkit/util/appcontext/appcontext_unix.go b/vendor/github.com/moby/buildkit/util/appcontext/appcontext_unix.go new file mode 100644 index 00000000..b586e2f6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/appcontext/appcontext_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package appcontext + +import ( + "os" + + "golang.org/x/sys/unix" +) + +var terminationSignals = []os.Signal{unix.SIGTERM, unix.SIGINT} diff --git a/vendor/github.com/moby/buildkit/util/appcontext/appcontext_windows.go b/vendor/github.com/moby/buildkit/util/appcontext/appcontext_windows.go new file mode 100644 index 00000000..0a8bcbe7 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/appcontext/appcontext_windows.go @@ -0,0 +1,7 @@ +package appcontext + +import ( + "os" +) + +var terminationSignals = []os.Signal{os.Interrupt} diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go new file mode 100644 index 00000000..6252147e --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go @@ -0,0 +1,69 @@ +// +build !windows + +package appdefaults + +import ( + "os" + "path/filepath" + "strings" +) + +const ( + Address = "unix:///run/buildkit/buildkitd.sock" + Root = "/var/lib/buildkit" + ConfigDir = "/etc/buildkit" +) + +// UserAddress typically returns /run/user/$UID/buildkit/buildkitd.sock +func UserAddress() string { + // pam_systemd sets XDG_RUNTIME_DIR but not other dirs. + xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR") + if xdgRuntimeDir != "" { + dirs := strings.Split(xdgRuntimeDir, ":") + return "unix://" + filepath.Join(dirs[0], "buildkit", "buildkitd.sock") + } + return Address +} + +// EnsureUserAddressDir sets sticky bit on XDG_RUNTIME_DIR if XDG_RUNTIME_DIR is set. +// See https://github.com/opencontainers/runc/issues/1694 +func EnsureUserAddressDir() error { + xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR") + if xdgRuntimeDir != "" { + dirs := strings.Split(xdgRuntimeDir, ":") + dir := filepath.Join(dirs[0], "buildkit") + if err := os.MkdirAll(dir, 0700); err != nil { + return err + } + return os.Chmod(dir, 0700|os.ModeSticky) + } + return nil +} + +// UserRoot typically returns /home/$USER/.local/share/buildkit +func UserRoot() string { + // pam_systemd sets XDG_RUNTIME_DIR but not other dirs. + xdgDataHome := os.Getenv("XDG_DATA_HOME") + if xdgDataHome != "" { + dirs := strings.Split(xdgDataHome, ":") + return filepath.Join(dirs[0], "buildkit") + } + home := os.Getenv("HOME") + if home != "" { + return filepath.Join(home, ".local", "share", "buildkit") + } + return Root +} + +// UserConfigDir returns dir for storing config. /home/$USER/.config/buildkit/ +func UserConfigDir() string { + xdgConfigHome := os.Getenv("XDG_CONFIG_HOME") + if xdgConfigHome != "" { + return filepath.Join(xdgConfigHome, "buildkit") + } + home := os.Getenv("HOME") + if home != "" { + return filepath.Join(home, ".config", "buildkit") + } + return ConfigDir +} diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go new file mode 100644 index 00000000..d5d0ca1f --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go @@ -0,0 +1,31 @@ +package appdefaults + +import ( + "os" + "path/filepath" +) + +const ( + Address = "npipe:////./pipe/buildkitd" +) + +var ( + Root = filepath.Join(os.Getenv("ProgramData"), "buildkitd", ".buildstate") + ConfigDir = filepath.Join(os.Getenv("ProgramData"), "buildkitd") +) + +func UserAddress() string { + return Address +} + +func EnsureUserAddressDir() error { + return nil +} + +func UserRoot() string { + return Root +} + +func UserConfigDir() string { + return ConfigDir +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/386_binary.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/386_binary.go new file mode 100644 index 00000000..580f152f --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/386_binary.go @@ -0,0 +1,8 @@ +// +build !386 + +package binfmt_misc + +// This file is generated by running make inside the binfmt_misc package. +// Do not edit manually. + +const Binary386 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\xd8\x31\x6e\xc2\x30\x14\x06\xe0\xdf\x8d\xdb\x26\x6a\x07\x1f\x20\xaa\x3a\x74\xe8\x64\xb5\x52\xae\x00\x2c\x88\x8d\x03\x80\x14\xc1\x94\x44\x89\x91\x60\x22\x47\x60\xe0\x20\x8c\x8c\x5c\x80\x13\x70\x19\xf4\xe2\x67\x91\x81\x25\xfb\xfb\xa4\x5f\x16\xcf\xe6\x29\xeb\x7b\xfb\xd1\x74\xac\x94\x42\xf0\x82\x08\xdd\xaf\x83\x8e\x33\x00\x7f\xc6\xd7\x33\x7c\x23\xc2\x2f\x74\xb8\x27\xad\x8e\x29\x27\x00\x14\x4d\x35\x03\x7f\x6f\x7c\x0f\x4a\x02\x80\xf2\xca\x75\x7a\x77\xa4\xb4\x3a\xa6\xa4\x00\x52\xfe\x7f\xc8\x27\xbf\x9f\xcc\xe6\xd4\xef\x42\xb5\xc7\x57\x0a\x21\x84\x10\x42\x08\x21\x84\x10\x62\x88\x33\x0d\xd5\xff\xb7\x6b\x0b\xdb\xac\x1b\x57\xbb\xc5\x12\xb6\x28\x5d\x6e\x57\xc5\xc6\x56\x75\x59\xe5\xb5\xdb\xc1\xba\x7c\xeb\x86\xf4\xfd\x00\xf0\xde\xed\x13\x78\xce\xe7\x19\x3f\xd0\x7c\x7e\xf1\x5c\xff\xc6\x3b\x07\x18\xbf\x2b\x08\x54\xef\x8c\x7a\xf5\xc4\x00\x3f\x4f\xde\xdd\x03\x00\x00\xff\xff\x8d\xf7\xd2\x72\xd0\x10\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/386_check.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/386_check.go new file mode 100644 index 00000000..8137d350 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/386_check.go @@ -0,0 +1,7 @@ +// +build !386 + +package binfmt_misc + +func i386Supported() error { + return check(Binary386) +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/386_check_386.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/386_check_386.go new file mode 100644 index 00000000..2b2ab45b --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/386_check_386.go @@ -0,0 +1,7 @@ +// +build 386 + +package binfmt_misc + +func i386Supported() error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/Dockerfile b/vendor/github.com/moby/buildkit/util/binfmt_misc/Dockerfile new file mode 100644 index 00000000..210355f6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/Dockerfile @@ -0,0 +1,56 @@ +FROM debian:buster-slim AS base +RUN apt-get update && apt-get --no-install-recommends install -y \ + binutils-arm-linux-gnueabihf \ + binutils-aarch64-linux-gnu \ + binutils-x86-64-linux-gnu \ + binutils-i686-linux-gnu \ + binutils-riscv64-linux-gnu \ + binutils-s390x-linux-gnu \ + binutils-powerpc64le-linux-gnu +WORKDIR /src + + +FROM base AS exit-amd64 +COPY fixtures/exit.amd64.s . +RUN x86_64-linux-gnu-as -o exit.o exit.amd64.s && x86_64-linux-gnu-ld -o exit -s exit.o + +FROM base AS exit-386 +COPY fixtures/exit.386.s . +RUN i686-linux-gnu-as -o exit.o exit.386.s && i686-linux-gnu-ld -o exit -s exit.o + +FROM base AS exit-arm64 +COPY fixtures/exit.arm64.s . +RUN aarch64-linux-gnu-as -o exit.o exit.arm64.s && aarch64-linux-gnu-ld -o exit -s exit.o + +FROM base AS exit-arm +COPY fixtures/exit.arm.s . +RUN arm-linux-gnueabihf-as -o exit.o exit.arm.s && arm-linux-gnueabihf-ld -o exit -s exit.o + +FROM base AS exit-riscv64 +COPY fixtures/exit.riscv64.s . +RUN riscv64-linux-gnu-as -o exit.o exit.riscv64.s && riscv64-linux-gnu-ld -o exit -s exit.o + +FROM base AS exit-s390x +COPY fixtures/exit.s390x.s . +RUN s390x-linux-gnu-as -o exit.o exit.s390x.s && s390x-linux-gnu-ld -o exit -s exit.o + +FROM base AS exit-ppc64le +COPY fixtures/exit.ppc64le.s . +RUN powerpc64le-linux-gnu-as -o exit.o exit.ppc64le.s && powerpc64le-linux-gnu-ld -o exit -s exit.o + +FROM golang:1.13-alpine AS generate +WORKDIR /src +COPY --from=exit-amd64 /src/exit amd64 +COPY --from=exit-386 /src/exit 386 +COPY --from=exit-arm64 /src/exit arm64 +COPY --from=exit-arm /src/exit arm +COPY --from=exit-riscv64 /src/exit riscv64 +COPY --from=exit-s390x /src/exit s390x +COPY --from=exit-ppc64le /src/exit ppc64le +COPY generate.go . + +RUN go run generate.go amd64 386 arm64 arm riscv64 s390x ppc64le && ls -l + + +FROM scratch +COPY --from=generate /src/*_binary.go / diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/Makefile b/vendor/github.com/moby/buildkit/util/binfmt_misc/Makefile new file mode 100644 index 00000000..74a38531 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/Makefile @@ -0,0 +1,4 @@ +generate: + buildctl build --frontend dockerfile.v0 --local context=. --local dockerfile=. --output type=local,dest=. + +.PHONY: generate diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_binary.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_binary.go new file mode 100644 index 00000000..3cafea92 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_binary.go @@ -0,0 +1,8 @@ +// +build !amd64 + +package binfmt_misc + +// This file is generated by running make inside the binfmt_misc package. +// Do not edit manually. + +const Binaryamd64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x06\x3b\x06\x30\x4f\xc0\x01\xcc\x77\x80\x8a\x1b\x08\xc0\x95\x30\x38\x30\x58\x30\x30\x33\x38\x30\xb0\x30\x30\x83\xd5\xb2\x30\x20\x03\x07\x14\x9a\x03\x6a\x34\x8c\x66\x80\x9a\x03\xe2\xb2\x22\xf1\x61\xf6\xc1\x68\x1e\xa8\x30\x8c\x86\xa9\x63\x81\xe2\x17\x50\xe1\x17\x50\x7b\x60\xb4\x02\x54\x1c\x46\x73\x30\x20\xf4\x09\x40\xed\x74\xf7\x0b\x05\xd9\x7f\x80\x05\xea\x8e\x51\x30\x0a\x46\xc1\x28\x18\x05\xa3\x60\x14\x8c\x82\x51\x30\x0a\x46\xc1\x28\x18\x05\xa3\x60\xb8\x03\x8f\xe3\x07\x6c\x40\x94\xe1\x7f\x7e\x56\x06\xbd\xe2\x8c\xe2\x92\xa2\x92\xc4\x24\x06\xbd\xbc\xfc\x92\x54\xbd\xf4\xbc\x52\xbd\x82\xa2\xfc\x82\xd4\xa2\x92\x4a\x06\xbd\x92\xd4\x8a\x12\x8a\xed\xe3\x66\x60\x60\x60\x07\x8f\x33\xa0\xf7\xdf\x51\xfb\xed\x0c\x68\xfd\x77\x18\x90\x83\xf6\xd9\xd9\x18\xd0\xc7\x0d\xd0\xc6\x0b\x18\x10\xe3\x0c\xe8\x7c\x66\x2c\xee\xe2\x81\xea\x57\x21\xa0\x1f\x10\x00\x00\xff\xff\x8a\x1b\xd7\x73\x30\x11\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check.go new file mode 100644 index 00000000..61913389 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check.go @@ -0,0 +1,7 @@ +// +build !amd64 + +package binfmt_misc + +func amd64Supported() error { + return check(Binaryamd64) +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check_amd64.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check_amd64.go new file mode 100644 index 00000000..e437dfef --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check_amd64.go @@ -0,0 +1,7 @@ +// +build amd64 + +package binfmt_misc + +func amd64Supported() error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_binary.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_binary.go new file mode 100644 index 00000000..01cdf9a2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_binary.go @@ -0,0 +1,8 @@ +// +build !arm64 + +package binfmt_misc + +// This file is generated by running make inside the binfmt_misc package. +// Do not edit manually. + +const Binaryarm64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x86\xed\x0c\x20\x5e\x05\x83\x03\x98\xef\x00\x15\x9f\xc1\x80\x00\x0e\x0c\x16\x0c\x8c\x0c\x0e\x0c\xcc\x0c\x4c\x60\xb5\xac\x0c\x0c\x28\xb2\xc8\x74\x0b\x94\xd7\x02\x97\x87\xd9\xd5\x70\x69\x05\x77\xc3\x25\x46\x06\x86\x2b\x0c\x7a\xc5\x19\xc5\x25\x45\x25\x89\x49\x0c\x7a\x25\xa9\x15\x25\x0c\x54\x00\xdc\x50\x9b\xd8\xa0\x7c\x98\x7f\x2a\xa0\x7c\x1e\x34\xf5\x2c\x68\x7c\x90\x5e\x66\x2c\xe6\xc2\xfc\x21\x88\x45\x3d\x32\x00\x04\x00\x00\xff\xff\xe7\x30\x54\x02\x58\x01\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check.go new file mode 100644 index 00000000..334d3142 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check.go @@ -0,0 +1,7 @@ +// +build !arm64 + +package binfmt_misc + +func arm64Supported() error { + return check(Binaryarm64) +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check_arm64.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check_arm64.go new file mode 100644 index 00000000..a5fc6dc6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check_arm64.go @@ -0,0 +1,7 @@ +// +build arm64 + +package binfmt_misc + +func arm64Supported() error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_binary.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_binary.go new file mode 100644 index 00000000..c780578b --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_binary.go @@ -0,0 +1,8 @@ +// +build !arm + +package binfmt_misc + +// This file is generated by running make inside the binfmt_misc package. +// Do not edit manually. + +const Binaryarm = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\x8c\x8e\x31\x0e\x82\x40\x14\x44\xdf\x17\x50\x13\x6d\xf4\x04\xda\x51\x6d\xc5\x05\x28\xb4\xd2\xc6\x70\x00\x97\x84\x44\x3a\x02\xdf\xc4\xce\x4b\x78\x00\xee\xc6\x01\xbc\x82\x01\x17\xdd\xc2\xc2\x49\x26\x93\x7d\x3b\xc9\x9f\xfb\xee\xb0\x17\x11\x46\x4d\x88\xe9\x5f\x19\x42\x02\x3c\xde\x30\x4a\xd8\x20\xc4\x84\x04\x7c\xdb\x32\xf8\x0c\x83\xa3\x0f\x6b\x3b\xa9\xda\x0e\x78\xa6\x2b\xc0\x16\x36\x2f\x91\x19\x30\x17\x4c\x73\x69\xb4\x56\x9b\x63\xb4\xb8\x29\x26\x3d\x1d\x8d\x55\xad\xcb\xfc\xaa\x45\xc3\xdf\x5a\xb8\x6b\x53\xb7\x37\x03\x96\xde\x7f\xe8\xb2\x9f\x10\x40\x35\xf2\x7e\xeb\xda\xeb\x89\x97\x81\xc7\x6b\x60\xfb\xa3\xf7\x0a\x00\x00\xff\xff\x73\x8f\xca\xf1\x34\x01\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check.go new file mode 100644 index 00000000..3a10daaf --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check.go @@ -0,0 +1,7 @@ +// +build !arm + +package binfmt_misc + +func armSupported() error { + return check(Binaryarm) +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check_arm.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check_arm.go new file mode 100644 index 00000000..11cfe616 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check_arm.go @@ -0,0 +1,7 @@ +// +build arm + +package binfmt_misc + +func armSupported() error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/check.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/check.go new file mode 100644 index 00000000..4caf5944 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/check.go @@ -0,0 +1,42 @@ +package binfmt_misc + +import ( + "bytes" + "compress/gzip" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" +) + +func check(bin string) error { + tmpdir, err := ioutil.TempDir("", "qemu-check") + if err != nil { + return err + } + defer os.RemoveAll(tmpdir) + pp := filepath.Join(tmpdir, "check") + + r, err := gzip.NewReader(bytes.NewReader([]byte(bin))) + if err != nil { + return err + } + defer r.Close() + + f, err := os.OpenFile(pp, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0700) + if err != nil { + return err + } + + if _, err := io.Copy(f, r); err != nil { + f.Close() + return err + } + f.Close() + + cmd := exec.Command("/check") + withChroot(cmd, tmpdir) + err = cmd.Run() + return err +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/check_unix.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/check_unix.go new file mode 100644 index 00000000..22c7ce7c --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/check_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package binfmt_misc + +import ( + "os/exec" + "syscall" +) + +func withChroot(cmd *exec.Cmd, dir string) { + cmd.SysProcAttr = &syscall.SysProcAttr{ + Chroot: dir, + } +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/check_windows.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/check_windows.go new file mode 100644 index 00000000..3c28c8bd --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/check_windows.go @@ -0,0 +1,10 @@ +// +build windows + +package binfmt_misc + +import ( + "os/exec" +) + +func withChroot(cmd *exec.Cmd, dir string) { +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go new file mode 100644 index 00000000..73eb405c --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go @@ -0,0 +1,134 @@ +package binfmt_misc + +import ( + "strings" + "sync" + + "github.com/containerd/containerd/platforms" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +var mu sync.Mutex +var arr []string + +func SupportedPlatforms(noCache bool) []string { + mu.Lock() + defer mu.Unlock() + if !noCache && arr != nil { + return arr + } + def := defaultPlatform() + arr = append([]string{}, def) + if p := "linux/amd64"; def != p && amd64Supported() == nil { + arr = append(arr, p) + } + if p := "linux/arm64"; def != p && arm64Supported() == nil { + arr = append(arr, p) + } + if p := "linux/riscv64"; def != p && riscv64Supported() == nil { + arr = append(arr, p) + } + if p := "linux/ppc64le"; def != p && ppc64leSupported() == nil { + arr = append(arr, p) + } + if p := "linux/s390x"; def != p && s390xSupported() == nil { + arr = append(arr, p) + } + if p := "linux/386"; def != p && i386Supported() == nil { + arr = append(arr, p) + } + if !strings.HasPrefix(def, "linux/arm/") && armSupported() == nil { + arr = append(arr, "linux/arm/v7", "linux/arm/v6") + } else if def == "linux/arm/v7" { + arr = append(arr, "linux/arm/v6") + } + return arr +} + +func Check(pp specs.Platform) bool { + p := platforms.Format(pp) + if p == "linux/amd64" && amd64Supported() == nil { + return true + } + if p == "linux/arm64" && arm64Supported() == nil { + return true + } + if p == "linux/riscv64" && riscv64Supported() == nil { + return true + } + if p == "linux/ppc64le" && ppc64leSupported() == nil { + return true + } + if p == "linux/s390x" && s390xSupported() == nil { + return true + } + if p == "linux/386" && i386Supported() == nil { + return true + } + if !strings.HasPrefix(p, "linux/arm/") && armSupported() == nil { + return true + } + + return false +} + +//WarnIfUnsupported validates the platforms and show warning message if there is, +//the end user could fix the issue based on those warning, and thus no need to drop +//the platform from the candidates. +func WarnIfUnsupported(pfs []string) { + def := defaultPlatform() + for _, p := range pfs { + if p != def { + if p == "linux/amd64" { + if err := amd64Supported(); err != nil { + printPlatfromWarning(p, err) + } + } + if p == "linux/arm64" { + if err := arm64Supported(); err != nil { + printPlatfromWarning(p, err) + } + } + if p == "linux/riscv64" { + if err := riscv64Supported(); err != nil { + printPlatfromWarning(p, err) + } + } + if p == "linux/ppc64le" { + if err := ppc64leSupported(); err != nil { + printPlatfromWarning(p, err) + } + } + if p == "linux/s390x" { + if err := s390xSupported(); err != nil { + printPlatfromWarning(p, err) + } + } + if p == "linux/386" { + if err := i386Supported(); err != nil { + printPlatfromWarning(p, err) + } + } + if strings.HasPrefix(p, "linux/arm/v6") || strings.HasPrefix(p, "linux/arm/v7") { + if err := armSupported(); err != nil { + printPlatfromWarning(p, err) + } + } + } + } +} + +func defaultPlatform() string { + return platforms.Format(platforms.Normalize(platforms.DefaultSpec())) +} + +func printPlatfromWarning(p string, err error) { + if strings.Contains(err.Error(), "exec format error") { + logrus.Warnf("platform %s cannot pass the validation, kernel support for miscellaneous binary may have not enabled.", p) + } else if strings.Contains(err.Error(), "no such file or directory") { + logrus.Warnf("platforms %s cannot pass the validation, '-F' flag might have not set for 'binfmt_misc'.", p) + } else { + logrus.Warnf("platforms %s cannot pass the validation: %s", p, err.Error()) + } +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_binary.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_binary.go new file mode 100644 index 00000000..511db714 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_binary.go @@ -0,0 +1,8 @@ +// +build !ppc64le + +package binfmt_misc + +// This file is generated by running make inside the binfmt_misc package. +// Do not edit manually. + +const Binaryppc64le = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x06\x51\x06\x10\xaf\x82\x81\x41\x00\xc4\x77\x80\x8a\x2f\x80\xcb\x83\xc4\x2c\x18\x18\x19\x1c\x18\x58\x18\x98\xc1\x6a\x59\x19\x50\x80\x00\x32\xdd\x02\xe5\xb4\xc0\xa5\x19\x61\xa4\x05\x03\x43\x82\x05\x13\x03\x83\x0b\x83\x5e\x71\x46\x71\x49\x51\x49\x62\x12\x83\x5e\x49\x6a\x45\x09\x83\x5e\x6a\x46\x7c\x5a\x51\x62\x6e\x2a\x03\xc5\x80\x1b\x6a\x23\x1b\x94\x0f\xf3\x57\x05\x94\xcf\x83\xa6\x9e\x03\x8d\x2f\x08\xd5\xcf\x84\xf0\x87\x00\xaa\x7f\x50\x01\x0b\x1a\x1f\xa4\x97\x19\x8b\x3a\x98\x7e\x69\x2c\xea\x91\x01\x20\x00\x00\xff\xff\xce\xf7\x15\x75\xa0\x01\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check.go new file mode 100644 index 00000000..4d5b3bf8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check.go @@ -0,0 +1,7 @@ +// +build !ppc64le + +package binfmt_misc + +func ppc64leSupported() error { + return check(Binaryppc64le) +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check_ppc64le.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check_ppc64le.go new file mode 100644 index 00000000..27e4ab8f --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check_ppc64le.go @@ -0,0 +1,7 @@ +// +build ppc64le + +package binfmt_misc + +func ppc64leSupported() error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_binary.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_binary.go new file mode 100644 index 00000000..146618e3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_binary.go @@ -0,0 +1,8 @@ +// +build !riscv64 + +package binfmt_misc + +// This file is generated by running make inside the binfmt_misc package. +// Do not edit manually. + +const Binaryriscv64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x86\xcf\x0c\x20\x5e\x05\x03\x44\xcc\x01\x2a\x3e\x03\x4a\xb3\x80\xc5\x2c\x18\x18\x19\x1c\x18\x98\x19\x98\xc0\xaa\x58\x19\x90\x01\x23\x0a\xdd\x02\xe5\xc1\x68\x06\x01\x08\x25\xcc\xca\xc0\x30\x99\xe3\x02\x6b\x31\x88\xa3\x57\x9c\x51\x5c\x52\x54\x92\x98\xc4\xa0\x57\x92\x5a\x51\xc2\x40\x05\xc0\x0d\x75\x01\x1b\x94\x0f\xf3\x4f\x05\x94\xcf\x83\xa6\x9e\x05\x8d\x0f\x52\xcd\x8c\xc5\x5c\x98\x3f\x04\xb1\xa8\x47\x06\x80\x00\x00\x00\xff\xff\x39\x41\xdf\xa1\x58\x01\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check.go new file mode 100644 index 00000000..c2f31f7e --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check.go @@ -0,0 +1,7 @@ +// +build !riscv64 + +package binfmt_misc + +func riscv64Supported() error { + return check(Binaryriscv64) +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check_riscv64.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check_riscv64.go new file mode 100644 index 00000000..7ae57360 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check_riscv64.go @@ -0,0 +1,7 @@ +// +build riscv64 + +package binfmt_misc + +func riscv64Supported() error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_binary.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_binary.go new file mode 100644 index 00000000..3d34c2e5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_binary.go @@ -0,0 +1,8 @@ +// +build !s390x + +package binfmt_misc + +// This file is generated by running make inside the binfmt_misc package. +// Do not edit manually. + +const Binarys390x = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x62\x64\x80\x03\x26\x06\x31\x06\x06\x06\xb0\x00\x23\x03\x43\x05\x54\xd4\x01\x4a\xcf\x80\xf2\x2c\x18\x18\x19\x1c\x18\x98\x19\x98\xa0\x6a\x59\x19\x90\x00\x23\x1a\xcd\xc0\xc0\xd0\x80\x4a\x0b\x30\x2c\xd7\x64\x60\xe0\x62\x64\x67\x67\xd0\x2b\xce\x28\x2e\x29\x2a\x49\x4c\x62\xd0\x2b\x49\xad\x28\x61\xa0\x1e\xe0\x46\x72\x02\x1b\x9a\x7f\x60\x34\x07\x9a\x1e\x16\x34\x6f\x30\xe3\x30\x1b\xe6\x1f\x41\x34\x71\xb8\x97\x01\x01\x00\x00\xff\xff\x0c\x76\x9a\xe1\x58\x01\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check.go new file mode 100644 index 00000000..1d5b4a08 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check.go @@ -0,0 +1,7 @@ +// +build !s390x + +package binfmt_misc + +func s390xSupported() error { + return check(Binarys390x) +} diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check_s390x.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check_s390x.go new file mode 100644 index 00000000..92554221 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check_s390x.go @@ -0,0 +1,7 @@ +// +build s390x + +package binfmt_misc + +func s390xSupported() error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/cond/cond.go b/vendor/github.com/moby/buildkit/util/cond/cond.go new file mode 100644 index 00000000..c5e07aec --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/cond/cond.go @@ -0,0 +1,40 @@ +package cond + +import ( + "sync" +) + +// NewStatefulCond returns a stateful version of sync.Cond . This cond will +// never block on `Wait()` if `Signal()` has been called after the `Wait()` last +// returned. This is useful for avoiding to take a lock on `cond.Locker` for +// signalling. +func NewStatefulCond(l sync.Locker) *StatefulCond { + sc := &StatefulCond{main: l} + sc.c = sync.NewCond(&sc.mu) + return sc +} + +type StatefulCond struct { + main sync.Locker + mu sync.Mutex + c *sync.Cond + signalled bool +} + +func (s *StatefulCond) Wait() { + s.main.Unlock() + s.mu.Lock() + if !s.signalled { + s.c.Wait() + } + s.signalled = false + s.mu.Unlock() + s.main.Lock() +} + +func (s *StatefulCond) Signal() { + s.mu.Lock() + s.signalled = true + s.c.Signal() + s.mu.Unlock() +} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/buffer.go b/vendor/github.com/moby/buildkit/util/contentutil/buffer.go new file mode 100644 index 00000000..ac8c8baf --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/contentutil/buffer.go @@ -0,0 +1,156 @@ +package contentutil + +import ( + "bytes" + "context" + "io/ioutil" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// Buffer is a content provider and ingester that keeps data in memory +type Buffer interface { + content.Provider + content.Ingester +} + +// NewBuffer returns a new buffer +func NewBuffer() Buffer { + return &buffer{ + buffers: map[digest.Digest][]byte{}, + refs: map[string]struct{}{}, + } +} + +type buffer struct { + mu sync.Mutex + buffers map[digest.Digest][]byte + refs map[string]struct{} +} + +func (b *buffer) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + b.mu.Lock() + if _, ok := b.refs[wOpts.Ref]; ok { + return nil, errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked", wOpts.Ref) + } + b.mu.Unlock() + return &bufferedWriter{ + main: b, + digester: digest.Canonical.Digester(), + buffer: bytes.NewBuffer(nil), + expected: wOpts.Desc.Digest, + releaseRef: func() { + b.mu.Lock() + delete(b.refs, wOpts.Ref) + b.mu.Unlock() + }, + }, nil +} + +func (b *buffer) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + r, err := b.getBytesReader(ctx, desc.Digest) + if err != nil { + return nil, err + } + return &readerAt{Reader: r, Closer: ioutil.NopCloser(r), size: int64(r.Len())}, nil +} + +func (b *buffer) getBytesReader(ctx context.Context, dgst digest.Digest) (*bytes.Reader, error) { + b.mu.Lock() + defer b.mu.Unlock() + + if dt, ok := b.buffers[dgst]; ok { + return bytes.NewReader(dt), nil + } + + return nil, errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) +} + +func (b *buffer) addValue(k digest.Digest, dt []byte) { + b.mu.Lock() + defer b.mu.Unlock() + b.buffers[k] = dt +} + +type bufferedWriter struct { + main *buffer + ref string + offset int64 + total int64 + startedAt time.Time + updatedAt time.Time + buffer *bytes.Buffer + expected digest.Digest + digester digest.Digester + releaseRef func() +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + n, err = w.buffer.Write(p) + w.digester.Hash().Write(p[:n]) + w.offset += int64(len(p)) + w.updatedAt = time.Now() + return n, err +} + +func (w *bufferedWriter) Close() error { + if w.buffer != nil { + w.releaseRef() + w.buffer = nil + } + return nil +} + +func (w *bufferedWriter) Status() (content.Status, error) { + return content.Status{ + Ref: w.ref, + Offset: w.offset, + Total: w.total, + StartedAt: w.startedAt, + UpdatedAt: w.updatedAt, + }, nil +} + +func (w *bufferedWriter) Digest() digest.Digest { + return w.digester.Digest() +} + +func (w *bufferedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opt ...content.Opt) error { + if w.buffer == nil { + return errors.Errorf("can't commit already committed or closed") + } + if s := int64(w.buffer.Len()); size > 0 && size != s { + return errors.Errorf("unexpected commit size %d, expected %d", s, size) + } + dgst := w.digester.Digest() + if expected != "" && expected != dgst { + return errors.Errorf("unexpected digest: %v != %v", dgst, expected) + } + if w.expected != "" && w.expected != dgst { + return errors.Errorf("unexpected digest: %v != %v", dgst, w.expected) + } + w.main.addValue(dgst, w.buffer.Bytes()) + return w.Close() +} + +func (w *bufferedWriter) Truncate(size int64) error { + if size != 0 { + return errors.New("Truncate: unsupported size") + } + w.offset = 0 + w.digester.Hash().Reset() + w.buffer.Reset() + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/copy.go b/vendor/github.com/moby/buildkit/util/contentutil/copy.go new file mode 100644 index 00000000..060d7a94 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/contentutil/copy.go @@ -0,0 +1,81 @@ +package contentutil + +import ( + "context" + "io" + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/remotes" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func Copy(ctx context.Context, ingester content.Ingester, provider content.Provider, desc ocispec.Descriptor) error { + if _, err := remotes.FetchHandler(ingester, &localFetcher{provider})(ctx, desc); err != nil { + return err + } + return nil +} + +type localFetcher struct { + content.Provider +} + +func (f *localFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + r, err := f.Provider.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + return &rc{ReaderAt: r}, nil +} + +type rc struct { + content.ReaderAt + offset int +} + +func (r *rc) Read(b []byte) (int, error) { + n, err := r.ReadAt(b, int64(r.offset)) + r.offset += n + if n > 0 && err == io.EOF { + err = nil + } + return n, err +} + +func CopyChain(ctx context.Context, ingester content.Ingester, provider content.Provider, desc ocispec.Descriptor) error { + var m sync.Mutex + manifestStack := []ocispec.Descriptor{} + + filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + m.Lock() + manifestStack = append(manifestStack, desc) + m.Unlock() + return nil, images.ErrStopHandler + default: + return nil, nil + } + }) + handlers := []images.Handler{ + images.ChildrenHandler(provider), + filterHandler, + remotes.FetchHandler(ingester, &localFetcher{provider}), + } + + if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil { + return errors.WithStack(err) + } + + for i := len(manifestStack) - 1; i >= 0; i-- { + if err := Copy(ctx, ingester, provider, manifestStack[i]); err != nil { + return errors.WithStack(err) + } + } + + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go b/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go new file mode 100644 index 00000000..d55c1012 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go @@ -0,0 +1,73 @@ +package contentutil + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/remotes" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func FromFetcher(f remotes.Fetcher) content.Provider { + return &fetchedProvider{ + f: f, + } +} + +type fetchedProvider struct { + f remotes.Fetcher +} + +func (p *fetchedProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + rc, err := p.f.Fetch(ctx, desc) + if err != nil { + return nil, err + } + + return &readerAt{Reader: rc, Closer: rc, size: desc.Size}, nil +} + +type readerAt struct { + io.Reader + io.Closer + size int64 + offset int64 +} + +func (r *readerAt) ReadAt(b []byte, off int64) (int, error) { + if ra, ok := r.Reader.(io.ReaderAt); ok { + return ra.ReadAt(b, off) + } + + if r.offset != off { + if seeker, ok := r.Reader.(io.Seeker); ok { + if _, err := seeker.Seek(off, io.SeekStart); err != nil { + return 0, err + } + r.offset = off + } else { + return 0, errors.Errorf("unsupported offset") + } + } + + var totalN int + for len(b) > 0 { + n, err := r.Reader.Read(b) + if err == io.EOF && n == len(b) { + err = nil + } + r.offset += int64(n) + totalN += n + b = b[n:] + if err != nil { + return totalN, err + } + } + return totalN, nil +} + +func (r *readerAt) Size() int64 { + return r.size +} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go b/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go new file mode 100644 index 00000000..3dafed7d --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go @@ -0,0 +1,48 @@ +package contentutil + +import ( + "context" + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// NewMultiProvider creates a new mutable provider with a base provider +func NewMultiProvider(base content.Provider) *MultiProvider { + return &MultiProvider{ + base: base, + sub: map[digest.Digest]content.Provider{}, + } +} + +// MultiProvider is a provider backed by a mutable map of providers +type MultiProvider struct { + mu sync.RWMutex + base content.Provider + sub map[digest.Digest]content.Provider +} + +// ReaderAt returns a content.ReaderAt +func (mp *MultiProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + mp.mu.RLock() + if p, ok := mp.sub[desc.Digest]; ok { + mp.mu.RUnlock() + return p.ReaderAt(ctx, desc) + } + mp.mu.RUnlock() + if mp.base == nil { + return nil, errors.Wrapf(errdefs.ErrNotFound, "content %v", desc.Digest) + } + return mp.base.ReaderAt(ctx, desc) +} + +// Add adds a new child provider for a specific digest +func (mp *MultiProvider) Add(dgst digest.Digest, p content.Provider) { + mp.mu.Lock() + defer mp.mu.Unlock() + mp.sub[dgst] = p +} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/pusher.go b/vendor/github.com/moby/buildkit/util/contentutil/pusher.go new file mode 100644 index 00000000..693dcfea --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/contentutil/pusher.go @@ -0,0 +1,122 @@ +package contentutil + +import ( + "context" + "runtime" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/remotes" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +func FromPusher(p remotes.Pusher) content.Ingester { + var mu sync.Mutex + c := sync.NewCond(&mu) + return &pushingIngester{ + mu: &mu, + c: c, + p: p, + active: map[digest.Digest]struct{}{}, + } +} + +type pushingIngester struct { + p remotes.Pusher + + mu *sync.Mutex + c *sync.Cond + active map[digest.Digest]struct{} +} + +// Writer implements content.Ingester. desc.MediaType must be set for manifest blobs. +func (i *pushingIngester) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + if wOpts.Ref == "" { + return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") + } + + st := time.Now() + + i.mu.Lock() + for { + if time.Since(st) > time.Hour { + i.mu.Unlock() + return nil, errors.Wrapf(errdefs.ErrUnavailable, "ref %v locked", wOpts.Desc.Digest) + } + if _, ok := i.active[wOpts.Desc.Digest]; ok { + i.c.Wait() + } else { + break + } + } + + i.active[wOpts.Desc.Digest] = struct{}{} + i.mu.Unlock() + + var once sync.Once + release := func() { + once.Do(func() { + i.mu.Lock() + delete(i.active, wOpts.Desc.Digest) + i.c.Broadcast() + i.mu.Unlock() + }) + } + + // pusher requires desc.MediaType to determine the PUT URL, especially for manifest blobs. + contentWriter, err := i.p.Push(ctx, wOpts.Desc) + if err != nil { + release() + return nil, err + } + runtime.SetFinalizer(contentWriter, func(_ content.Writer) { + release() + }) + return &writer{ + Writer: contentWriter, + contentWriterRef: wOpts.Ref, + release: release, + }, nil +} + +type writer struct { + content.Writer // returned from pusher.Push + contentWriterRef string // ref passed for Writer() + release func() +} + +func (w *writer) Status() (content.Status, error) { + st, err := w.Writer.Status() + if err != nil { + return st, err + } + if w.contentWriterRef != "" { + st.Ref = w.contentWriterRef + } + return st, nil +} + +func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + err := w.Writer.Commit(ctx, size, expected, opts...) + if w.release != nil { + w.release() + } + return err +} + +func (w *writer) Close() error { + err := w.Writer.Close() + if w.release != nil { + w.release() + } + return err +} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/refs.go b/vendor/github.com/moby/buildkit/util/contentutil/refs.go new file mode 100644 index 00000000..e62d7987 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/contentutil/refs.go @@ -0,0 +1,98 @@ +package contentutil + +import ( + "context" + "net/http" + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + "github.com/docker/docker/pkg/locker" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func ProviderFromRef(ref string) (ocispec.Descriptor, content.Provider, error) { + remote := docker.NewResolver(docker.ResolverOptions{ + Client: http.DefaultClient, + }) + + name, desc, err := remote.Resolve(context.TODO(), ref) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + + fetcher, err := remote.Fetcher(context.TODO(), name) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + return desc, FromFetcher(fetcher), nil +} + +func IngesterFromRef(ref string) (content.Ingester, error) { + remote := docker.NewResolver(docker.ResolverOptions{ + Client: http.DefaultClient, + }) + + pusher, err := remote.Pusher(context.TODO(), ref) + if err != nil { + return nil, err + } + + return &ingester{ + locker: locker.New(), + pusher: pusher, + }, nil +} + +type ingester struct { + locker *locker.Locker + pusher remotes.Pusher +} + +func (w *ingester) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wo content.WriterOpts + for _, o := range opts { + if err := o(&wo); err != nil { + return nil, err + } + } + if wo.Ref == "" { + return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") + } + w.locker.Lock(wo.Ref) + var once sync.Once + unlock := func() { + once.Do(func() { + w.locker.Unlock(wo.Ref) + }) + } + writer, err := w.pusher.Push(ctx, wo.Desc) + if err != nil { + unlock() + return nil, err + } + return &lockedWriter{unlock: unlock, Writer: writer}, nil +} + +type lockedWriter struct { + unlock func() + content.Writer +} + +func (w *lockedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + err := w.Writer.Commit(ctx, size, expected, opts...) + if err == nil { + w.unlock() + } + return err +} + +func (w *lockedWriter) Close() error { + err := w.Writer.Close() + w.unlock() + return err +} diff --git a/vendor/github.com/moby/buildkit/util/entitlements/entitlements.go b/vendor/github.com/moby/buildkit/util/entitlements/entitlements.go new file mode 100644 index 00000000..f65b426b --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/entitlements/entitlements.go @@ -0,0 +1,60 @@ +package entitlements + +import ( + "github.com/pkg/errors" +) + +type Entitlement string + +const ( + EntitlementSecurityInsecure Entitlement = "security.insecure" + EntitlementNetworkHost Entitlement = "network.host" +) + +var all = map[Entitlement]struct{}{ + EntitlementSecurityInsecure: {}, + EntitlementNetworkHost: {}, +} + +func Parse(s string) (Entitlement, error) { + _, ok := all[Entitlement(s)] + if !ok { + return "", errors.Errorf("unknown entitlement %s", s) + } + return Entitlement(s), nil +} + +func WhiteList(allowed, supported []Entitlement) (Set, error) { + m := map[Entitlement]struct{}{} + + var supm Set + if supported != nil { + var err error + supm, err = WhiteList(supported, nil) + if err != nil { // should not happen + return nil, err + } + } + + for _, e := range allowed { + e, err := Parse(string(e)) + if err != nil { + return nil, err + } + if supported != nil { + if !supm.Allowed(e) { + return nil, errors.Errorf("granting entitlement %s is not allowed by build daemon configuration", e) + } + } + m[e] = struct{}{} + } + + return Set(m), nil +} + +type Set map[Entitlement]struct{} + +func (s Set) Allowed(e Entitlement) bool { + _, ok := s[e] + return ok +} diff --git a/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go b/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go new file mode 100644 index 00000000..2b9b1512 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go @@ -0,0 +1,163 @@ +package security + +import ( + "context" + "fmt" + "os" + + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/oci" + "github.com/opencontainers/runc/libcontainer/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// WithInsecureSpec sets spec with All capability. +func WithInsecureSpec() oci.SpecOpts { + return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { + addCaps := []string{ + "CAP_FSETID", + "CAP_KILL", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_SETFCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_AUDIT_WRITE", + "CAP_MAC_ADMIN", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_SYS_PTRACE", + "CAP_SYS_MODULE", + "CAP_SYSLOG", + "CAP_SYS_RAWIO", + "CAP_SYS_ADMIN", + "CAP_LINUX_IMMUTABLE", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_PACCT", + "CAP_SYS_TTY_CONFIG", + "CAP_SYS_TIME", + "CAP_WAKE_ALARM", + "CAP_AUDIT_READ", + "CAP_AUDIT_CONTROL", + "CAP_SYS_RESOURCE", + "CAP_BLOCK_SUSPEND", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_LEASE", + "CAP_NET_ADMIN", + "CAP_NET_BROADCAST", + } + for _, cap := range addCaps { + s.Process.Capabilities.Bounding = append(s.Process.Capabilities.Bounding, cap) + s.Process.Capabilities.Ambient = append(s.Process.Capabilities.Ambient, cap) + s.Process.Capabilities.Effective = append(s.Process.Capabilities.Effective, cap) + s.Process.Capabilities.Inheritable = append(s.Process.Capabilities.Inheritable, cap) + s.Process.Capabilities.Permitted = append(s.Process.Capabilities.Permitted, cap) + } + s.Linux.ReadonlyPaths = []string{} + s.Linux.MaskedPaths = []string{} + s.Process.ApparmorProfile = "" + + s.Linux.Resources.Devices = []specs.LinuxDeviceCgroup{ + { + Allow: true, + Type: "c", + Access: "rwm", + }, + { + Allow: true, + Type: "b", + Access: "rwm", + }, + } + + if !system.RunningInUserNS() { + // Devices automatically mounted on insecure mode + s.Linux.Devices = append(s.Linux.Devices, []specs.LinuxDevice{ + // Writes to this come out as printk's, reads export the buffered printk records. (dmesg) + { + Path: "/dev/kmsg", + Type: "c", + Major: 1, + Minor: 11, + }, + // Cuse (character device in user-space) + { + Path: "/dev/cuse", + Type: "c", + Major: 10, + Minor: 203, + }, + // Fuse (virtual filesystem in user-space) + { + Path: "/dev/fuse", + Type: "c", + Major: 10, + Minor: 229, + }, + // Kernel-based virtual machine (hardware virtualization extensions) + { + Path: "/dev/kvm", + Type: "c", + Major: 10, + Minor: 232, + }, + // TAP/TUN network device + { + Path: "/dev/net/tun", + Type: "c", + Major: 10, + Minor: 200, + }, + // Loopback control device + { + Path: "/dev/loop-control", + Type: "c", + Major: 10, + Minor: 237, + }, + }...) + + loopID, err := getFreeLoopID() + if err != nil { + logrus.Debugf("failed to get next free loop device: %v", err) + } + + for i := 0; i <= loopID+7; i++ { + s.Linux.Devices = append(s.Linux.Devices, specs.LinuxDevice{ + Path: fmt.Sprintf("/dev/loop%d", i), + Type: "b", + Major: 7, + Minor: int64(i), + }) + } + } + + return nil + } +} + +func getFreeLoopID() (int, error) { + fd, err := os.OpenFile("/dev/loop-control", os.O_RDWR, 0644) + if err != nil { + return 0, err + } + defer fd.Close() + + const _LOOP_CTL_GET_FREE = 0x4C82 + r1, _, uerr := unix.Syscall(unix.SYS_IOCTL, fd.Fd(), _LOOP_CTL_GET_FREE, 0) + if uerr == 0 { + return int(r1), nil + } + return 0, errors.Errorf("error getting free loop device: %v", uerr) +} diff --git a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go b/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go new file mode 100644 index 00000000..f06d4e89 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go @@ -0,0 +1,341 @@ +package flightcontrol + +import ( + "context" + "io" + "runtime" + "sort" + "sync" + "time" + + "github.com/moby/buildkit/util/progress" + "github.com/pkg/errors" +) + +// flightcontrol is like singleflight but with support for cancellation and +// nested progress reporting + +var ( + errRetry = errors.Errorf("retry") + errRetryTimeout = errors.Errorf("exceeded retry timeout") +) + +type contextKeyT string + +var contextKey = contextKeyT("buildkit/util/flightcontrol.progress") + +// Group is a flightcontrol synchronization group +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Do executes a context function syncronized by the key +func (g *Group) Do(ctx context.Context, key string, fn func(ctx context.Context) (interface{}, error)) (v interface{}, err error) { + var backoff time.Duration + for { + v, err = g.do(ctx, key, fn) + if err == nil || errors.Cause(err) != errRetry { + return v, err + } + // backoff logic + if backoff >= 3*time.Second { + err = errors.Wrapf(errRetryTimeout, "flightcontrol") + return v, err + } + runtime.Gosched() + if backoff > 0 { + time.Sleep(backoff) + backoff *= 2 + } else { + backoff = time.Millisecond + } + } +} + +func (g *Group) do(ctx context.Context, key string, fn func(ctx context.Context) (interface{}, error)) (interface{}, error) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + + if c, ok := g.m[key]; ok { // register 2nd waiter + g.mu.Unlock() + return c.wait(ctx) + } + + c := newCall(fn) + g.m[key] = c + go func() { + // cleanup after a caller has returned + <-c.ready + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() + close(c.cleaned) + }() + g.mu.Unlock() + return c.wait(ctx) +} + +type call struct { + mu sync.Mutex + result interface{} + err error + ready chan struct{} + cleaned chan struct{} + + ctx *sharedContext + ctxs []context.Context + fn func(ctx context.Context) (interface{}, error) + once sync.Once + + closeProgressWriter func() + progressState *progressState + progressCtx context.Context +} + +func newCall(fn func(ctx context.Context) (interface{}, error)) *call { + c := &call{ + fn: fn, + ready: make(chan struct{}), + cleaned: make(chan struct{}), + progressState: newProgressState(), + } + ctx := newContext(c) // newSharedContext + pr, pctx, closeProgressWriter := progress.NewContext(context.Background()) + + c.progressCtx = pctx + c.ctx = ctx + c.closeProgressWriter = closeProgressWriter + + go c.progressState.run(pr) // TODO: remove this, wrap writer instead + + return c +} + +func (c *call) run() { + defer c.closeProgressWriter() + ctx, cancel := context.WithCancel(c.ctx) + defer cancel() + v, err := c.fn(ctx) + c.mu.Lock() + c.result = v + c.err = err + c.mu.Unlock() + close(c.ready) +} + +func (c *call) wait(ctx context.Context) (v interface{}, err error) { + c.mu.Lock() + // detect case where caller has just returned, let it clean up before + select { + case <-c.ready: // could return if no error + c.mu.Unlock() + <-c.cleaned + return nil, errRetry + default: + } + + pw, ok, ctx := progress.FromContext(ctx) + if ok { + c.progressState.add(pw) + } + c.ctxs = append(c.ctxs, ctx) + + c.mu.Unlock() + + go c.once.Do(c.run) + + select { + case <-ctx.Done(): + select { + case <-c.ctx.Done(): + // if this cancelled the last context, then wait for function to shut down + // and don't accept any more callers + <-c.ready + return c.result, c.err + default: + if ok { + c.progressState.close(pw) + } + return nil, ctx.Err() + } + case <-c.ready: + return c.result, c.err // shared not implemented yet + } +} + +func (c *call) Deadline() (deadline time.Time, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + for _, ctx := range c.ctxs { + select { + case <-ctx.Done(): + default: + dl, ok := ctx.Deadline() + if ok { + return dl, ok + } + } + } + return time.Time{}, false +} + +func (c *call) Done() <-chan struct{} { + c.mu.Lock() + c.ctx.signal() + c.mu.Unlock() + return c.ctx.done +} + +func (c *call) Err() error { + select { + case <-c.ctx.Done(): + return c.ctx.err + default: + return nil + } +} + +func (c *call) Value(key interface{}) interface{} { + if key == contextKey { + return c.progressState + } + c.mu.Lock() + defer c.mu.Unlock() + + ctx := c.progressCtx + select { + case <-ctx.Done(): + default: + if v := ctx.Value(key); v != nil { + return v + } + } + + if len(c.ctxs) > 0 { + ctx = c.ctxs[0] + select { + case <-ctx.Done(): + default: + if v := ctx.Value(key); v != nil { + return v + } + } + } + + return nil +} + +type sharedContext struct { + *call + done chan struct{} + err error +} + +func newContext(c *call) *sharedContext { + return &sharedContext{call: c, done: make(chan struct{})} +} + +// call with lock +func (c *sharedContext) signal() { + select { + case <-c.done: + default: + var err error + for _, ctx := range c.ctxs { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + return + } + } + c.err = err + close(c.done) + } +} + +type rawProgressWriter interface { + WriteRawProgress(*progress.Progress) error + Close() error +} + +type progressState struct { + mu sync.Mutex + items map[string]*progress.Progress + writers []rawProgressWriter + done bool +} + +func newProgressState() *progressState { + return &progressState{ + items: make(map[string]*progress.Progress), + } +} + +func (ps *progressState) run(pr progress.Reader) { + for { + p, err := pr.Read(context.TODO()) + if err != nil { + if err == io.EOF { + ps.mu.Lock() + ps.done = true + ps.mu.Unlock() + for _, w := range ps.writers { + w.Close() + } + } + return + } + ps.mu.Lock() + for _, p := range p { + for _, w := range ps.writers { + w.WriteRawProgress(p) + } + ps.items[p.ID] = p + } + ps.mu.Unlock() + } +} + +func (ps *progressState) add(pw progress.Writer) { + rw, ok := pw.(rawProgressWriter) + if !ok { + return + } + ps.mu.Lock() + plist := make([]*progress.Progress, 0, len(ps.items)) + for _, p := range ps.items { + plist = append(plist, p) + } + sort.Slice(plist, func(i, j int) bool { + return plist[i].Timestamp.Before(plist[j].Timestamp) + }) + for _, p := range plist { + rw.WriteRawProgress(p) + } + if ps.done { + rw.Close() + } else { + ps.writers = append(ps.writers, rw) + } + ps.mu.Unlock() +} + +func (ps *progressState) close(pw progress.Writer) { + rw, ok := pw.(rawProgressWriter) + if !ok { + return + } + ps.mu.Lock() + for i, w := range ps.writers { + if w == rw { + w.Close() + ps.writers = append(ps.writers[:i], ps.writers[i+1:]...) + break + } + } + ps.mu.Unlock() +} diff --git a/vendor/github.com/moby/buildkit/util/imageutil/config.go b/vendor/github.com/moby/buildkit/util/imageutil/config.go new file mode 100644 index 00000000..e0d9f174 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/imageutil/config.go @@ -0,0 +1,196 @@ +package imageutil + +import ( + "context" + "encoding/json" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/reference" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + "github.com/moby/buildkit/util/leaseutil" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type ContentCache interface { + content.Ingester + content.Provider +} + +var leasesMu sync.Mutex +var leasesF []func(context.Context) error + +func CancelCacheLeases() { + leasesMu.Lock() + for _, f := range leasesF { + f(context.TODO()) + } + leasesF = nil + leasesMu.Unlock() +} + +func Config(ctx context.Context, str string, resolver remotes.Resolver, cache ContentCache, leaseManager leases.Manager, p *specs.Platform) (digest.Digest, []byte, error) { + // TODO: fix buildkit to take interface instead of struct + var platform platforms.MatchComparer + if p != nil { + platform = platforms.Only(*p) + } else { + platform = platforms.Default() + } + ref, err := reference.Parse(str) + if err != nil { + return "", nil, errors.WithStack(err) + } + + if leaseManager != nil { + ctx2, done, err := leaseutil.WithLease(ctx, leaseManager, leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary) + if err != nil { + return "", nil, errors.WithStack(err) + } + ctx = ctx2 + defer func() { + // this lease is not deleted to allow other components to access manifest/config from cache. It will be deleted after 5 min deadline or on pruning inactive builder + leasesMu.Lock() + leasesF = append(leasesF, done) + leasesMu.Unlock() + }() + } + + desc := specs.Descriptor{ + Digest: ref.Digest(), + } + if desc.Digest != "" { + ra, err := cache.ReaderAt(ctx, desc) + if err == nil { + desc.Size = ra.Size() + mt, err := DetectManifestMediaType(ra) + if err == nil { + desc.MediaType = mt + } + } + } + // use resolver if desc is incomplete + if desc.MediaType == "" { + _, desc, err = resolver.Resolve(ctx, ref.String()) + if err != nil { + return "", nil, err + } + } + + fetcher, err := resolver.Fetcher(ctx, ref.String()) + if err != nil { + return "", nil, err + } + + if desc.MediaType == images.MediaTypeDockerSchema1Manifest { + return readSchema1Config(ctx, ref.String(), desc, fetcher, cache) + } + + children := childrenConfigHandler(cache, platform) + + handlers := []images.Handler{ + remotes.FetchHandler(cache, fetcher), + children, + } + if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil { + return "", nil, err + } + config, err := images.Config(ctx, cache, desc, platform) + if err != nil { + return "", nil, err + } + + dt, err := content.ReadBlob(ctx, cache, config) + if err != nil { + return "", nil, err + } + + return desc.Digest, dt, nil +} + +func childrenConfigHandler(provider content.Provider, platform platforms.MatchComparer) images.HandlerFunc { + return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { + var descs []specs.Descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, specs.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + // TODO(stevvooe): We just assume oci manifest, for now. There may be + // subtle differences from the docker version. + var manifest specs.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + descs = append(descs, manifest.Config) + case images.MediaTypeDockerSchema2ManifestList, specs.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + var index specs.Index + if err := json.Unmarshal(p, &index); err != nil { + return nil, err + } + + if platform != nil { + for _, d := range index.Manifests { + if d.Platform == nil || platform.Match(*d.Platform) { + descs = append(descs, d) + } + } + } else { + descs = append(descs, index.Manifests...) + } + case images.MediaTypeDockerSchema2Config, specs.MediaTypeImageConfig, docker.LegacyConfigMediaType: + // childless data types. + return nil, nil + default: + return nil, errors.Errorf("encountered unknown type %v; children may not be fetched", desc.MediaType) + } + + return descs, nil + } +} + +// specs.MediaTypeImageManifest, // TODO: detect schema1/manifest-list +func DetectManifestMediaType(ra content.ReaderAt) (string, error) { + // TODO: schema1 + + dt := make([]byte, ra.Size()) + if _, err := ra.ReadAt(dt, 0); err != nil { + return "", err + } + + return DetectManifestBlobMediaType(dt) +} + +func DetectManifestBlobMediaType(dt []byte) (string, error) { + var mfst struct { + MediaType string `json:"mediaType"` + Config json.RawMessage `json:"config"` + } + + if err := json.Unmarshal(dt, &mfst); err != nil { + return "", err + } + + if mfst.MediaType != "" { + return mfst.MediaType, nil + } + if mfst.Config != nil { + return images.MediaTypeDockerSchema2Manifest, nil + } + return images.MediaTypeDockerSchema2ManifestList, nil +} diff --git a/vendor/github.com/moby/buildkit/util/imageutil/schema1.go b/vendor/github.com/moby/buildkit/util/imageutil/schema1.go new file mode 100644 index 00000000..591676ff --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/imageutil/schema1.go @@ -0,0 +1,87 @@ +package imageutil + +import ( + "context" + "encoding/json" + "io/ioutil" + "strings" + "time" + + "github.com/containerd/containerd/remotes" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func readSchema1Config(ctx context.Context, ref string, desc specs.Descriptor, fetcher remotes.Fetcher, cache ContentCache) (digest.Digest, []byte, error) { + rc, err := fetcher.Fetch(ctx, desc) + if err != nil { + return "", nil, err + } + defer rc.Close() + dt, err := ioutil.ReadAll(rc) + if err != nil { + return "", nil, errors.Wrap(err, "failed to fetch schema1 manifest") + } + dt, err = convertSchema1ConfigMeta(dt) + if err != nil { + return "", nil, err + } + return desc.Digest, dt, nil +} + +func convertSchema1ConfigMeta(in []byte) ([]byte, error) { + type history struct { + V1Compatibility string `json:"v1Compatibility"` + } + var m struct { + History []history `json:"history"` + } + if err := json.Unmarshal(in, &m); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal schema1 manifest") + } + if len(m.History) == 0 { + return nil, errors.Errorf("invalid schema1 manifest") + } + + var img specs.Image + if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), &img); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal image from schema 1 history") + } + + img.RootFS = specs.RootFS{ + Type: "layers", // filled in by exporter + } + img.History = make([]specs.History, len(m.History)) + + for i := range m.History { + var h v1History + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal history") + } + img.History[len(m.History)-i-1] = specs.History{ + Author: h.Author, + Comment: h.Comment, + Created: &h.Created, + CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "), + EmptyLayer: (h.ThrowAway != nil && *h.ThrowAway) || (h.Size != nil && *h.Size == 0), + } + } + + dt, err := json.MarshalIndent(img, "", " ") + if err != nil { + return nil, errors.Wrap(err, "failed to marshal schema1 config") + } + return dt, nil +} + +type v1History struct { + Author string `json:"author,omitempty"` + Created time.Time `json:"created"` + Comment string `json:"comment,omitempty"` + ThrowAway *bool `json:"throwaway,omitempty"` + Size *int `json:"Size,omitempty"` // used before ThrowAway field + ContainerConfig struct { + Cmd []string `json:"Cmd,omitempty"` + } `json:"container_config,omitempty"` +} diff --git a/vendor/github.com/moby/buildkit/util/leaseutil/manager.go b/vendor/github.com/moby/buildkit/util/leaseutil/manager.go new file mode 100644 index 00000000..45a35273 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/leaseutil/manager.go @@ -0,0 +1,75 @@ +package leaseutil + +import ( + "context" + "time" + + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/namespaces" +) + +func WithLease(ctx context.Context, ls leases.Manager, opts ...leases.Opt) (context.Context, func(context.Context) error, error) { + _, ok := leases.FromContext(ctx) + if ok { + return ctx, func(context.Context) error { + return nil + }, nil + } + + l, err := ls.Create(ctx, append([]leases.Opt{leases.WithRandomID(), leases.WithExpiration(time.Hour)}, opts...)...) + if err != nil { + return nil, nil, err + } + + ctx = leases.WithLease(ctx, l.ID) + return ctx, func(ctx context.Context) error { + return ls.Delete(ctx, l) + }, nil +} + +func MakeTemporary(l *leases.Lease) error { + if l.Labels == nil { + l.Labels = map[string]string{} + } + l.Labels["buildkit/lease.temporary"] = time.Now().UTC().Format(time.RFC3339Nano) + return nil +} + +func WithNamespace(lm leases.Manager, ns string) leases.Manager { + return &nsLM{manager: lm, ns: ns} +} + +type nsLM struct { + manager leases.Manager + ns string +} + +func (l *nsLM) Create(ctx context.Context, opts ...leases.Opt) (leases.Lease, error) { + ctx = namespaces.WithNamespace(ctx, l.ns) + return l.manager.Create(ctx, opts...) +} + +func (l *nsLM) Delete(ctx context.Context, lease leases.Lease, opts ...leases.DeleteOpt) error { + ctx = namespaces.WithNamespace(ctx, l.ns) + return l.manager.Delete(ctx, lease, opts...) +} + +func (l *nsLM) List(ctx context.Context, filters ...string) ([]leases.Lease, error) { + ctx = namespaces.WithNamespace(ctx, l.ns) + return l.manager.List(ctx, filters...) +} + +func (l *nsLM) AddResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error { + ctx = namespaces.WithNamespace(ctx, l.ns) + return l.manager.AddResource(ctx, lease, resource) +} + +func (l *nsLM) DeleteResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error { + ctx = namespaces.WithNamespace(ctx, l.ns) + return l.manager.DeleteResource(ctx, lease, resource) +} + +func (l *nsLM) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) { + ctx = namespaces.WithNamespace(ctx, l.ns) + return l.manager.ListResources(ctx, lease) +} diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/allowempty.s b/vendor/github.com/moby/buildkit/util/network/cniprovider/allowempty.s new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go new file mode 100644 index 00000000..5346654e --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go @@ -0,0 +1,120 @@ +package cniprovider + +import ( + "context" + "os" + "path/filepath" + "syscall" + + "github.com/containerd/containerd/oci" + "github.com/containerd/go-cni" + "github.com/gofrs/flock" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/util/network" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +type Opt struct { + Root string + ConfigPath string + BinaryDir string +} + +func New(opt Opt) (network.Provider, error) { + if _, err := os.Stat(opt.ConfigPath); err != nil { + return nil, errors.Wrapf(err, "failed to read cni config %q", opt.ConfigPath) + } + if _, err := os.Stat(opt.BinaryDir); err != nil { + return nil, errors.Wrapf(err, "failed to read cni binary dir %q", opt.BinaryDir) + } + + cniHandle, err := cni.New( + cni.WithMinNetworkCount(2), + cni.WithConfFile(opt.ConfigPath), + cni.WithPluginDir([]string{opt.BinaryDir}), + cni.WithLoNetwork, + cni.WithInterfacePrefix(("eth"))) + if err != nil { + return nil, err + } + + if err != nil { + return nil, err + } + + cp := &cniProvider{CNI: cniHandle, root: opt.Root} + if err := cp.initNetwork(); err != nil { + return nil, err + } + return cp, nil +} + +type cniProvider struct { + cni.CNI + root string +} + +func (c *cniProvider) initNetwork() error { + if v := os.Getenv("BUILDKIT_CNI_INIT_LOCK_PATH"); v != "" { + l := flock.New(v) + if err := l.Lock(); err != nil { + return err + } + defer l.Unlock() + } + ns, err := c.New() + if err != nil { + return err + } + return ns.Close() +} + +func (c *cniProvider) New() (network.Namespace, error) { + id := identity.NewID() + nsPath := filepath.Join(c.root, "net/cni", id) + if err := os.MkdirAll(filepath.Dir(nsPath), 0700); err != nil { + return nil, err + } + + if err := createNetNS(nsPath); err != nil { + os.RemoveAll(filepath.Dir(nsPath)) + return nil, err + } + + if _, err := c.CNI.Setup(context.TODO(), id, nsPath); err != nil { + os.RemoveAll(filepath.Dir(nsPath)) + return nil, errors.Wrap(err, "CNI setup error") + } + + return &cniNS{path: nsPath, id: id, handle: c.CNI}, nil +} + +type cniNS struct { + handle cni.CNI + id string + path string +} + +func (ns *cniNS) Set(s *specs.Spec) { + oci.WithLinuxNamespace(specs.LinuxNamespace{ + Type: specs.NetworkNamespace, + Path: ns.path, + })(nil, nil, nil, s) +} + +func (ns *cniNS) Close() error { + err := ns.handle.Remove(context.TODO(), ns.id, ns.path) + + if err1 := unix.Unmount(ns.path, unix.MNT_DETACH); err1 != nil { + if err1 != syscall.EINVAL && err1 != syscall.ENOENT && err == nil { + err = errors.Wrap(err1, "error unmounting network namespace") + } + } + if err1 := os.RemoveAll(filepath.Dir(ns.path)); err1 != nil && !os.IsNotExist(err1) && err == nil { + err = errors.Wrap(err, "error removing network namespace") + } + + return err +} diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go new file mode 100644 index 00000000..99490813 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go @@ -0,0 +1,16 @@ +// +build linux + +package cniprovider + +import ( + _ "unsafe" // required for go:linkname. +) + +//go:linkname beforeFork syscall.runtime_BeforeFork +func beforeFork() + +//go:linkname afterFork syscall.runtime_AfterFork +func afterFork() + +//go:linkname afterForkInChild syscall.runtime_AfterForkInChild +func afterForkInChild() diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go new file mode 100644 index 00000000..87012cf8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go @@ -0,0 +1,59 @@ +// +build linux + +package cniprovider + +import ( + "os" + "syscall" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func createNetNS(p string) error { + f, err := os.Create(p) + if err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + procNetNSBytes, err := syscall.BytePtrFromString("/proc/self/ns/net") + if err != nil { + return err + } + pBytes, err := syscall.BytePtrFromString(p) + if err != nil { + return err + } + beforeFork() + + pid, _, errno := syscall.RawSyscall6(syscall.SYS_CLONE, uintptr(syscall.SIGCHLD)|unix.CLONE_NEWNET, 0, 0, 0, 0, 0) + if errno != 0 { + afterFork() + return errno + } + + if pid != 0 { + afterFork() + var ws unix.WaitStatus + _, err = unix.Wait4(int(pid), &ws, 0, nil) + for err == syscall.EINTR { + _, err = unix.Wait4(int(pid), &ws, 0, nil) + } + + if err != nil { + return errors.Wrapf(err, "failed to find pid=%d process", pid) + } + errno = syscall.Errno(ws.ExitStatus()) + if errno != 0 { + return errors.Wrap(errno, "failed to mount") + } + return nil + } + afterForkInChild() + _, _, errno = syscall.RawSyscall6(syscall.SYS_MOUNT, uintptr(unsafe.Pointer(procNetNSBytes)), uintptr(unsafe.Pointer(pBytes)), 0, uintptr(unix.MS_BIND), 0, 0) + syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0) + panic("unreachable") +} diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_nolinux.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_nolinux.go new file mode 100644 index 00000000..360ffb27 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_nolinux.go @@ -0,0 +1,9 @@ +// +build !linux + +package cniprovider + +import "github.com/pkg/errors" + +func createNetNS(p string) error { + return errors.Errorf("creating netns for cni not supported") +} diff --git a/vendor/github.com/moby/buildkit/util/network/host.go b/vendor/github.com/moby/buildkit/util/network/host.go new file mode 100644 index 00000000..dc58b1ce --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/host.go @@ -0,0 +1,28 @@ +package network + +import ( + "github.com/containerd/containerd/oci" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewHostProvider() Provider { + return &host{} +} + +type host struct { +} + +func (h *host) New() (Namespace, error) { + return &hostNS{}, nil +} + +type hostNS struct { +} + +func (h *hostNS) Set(s *specs.Spec) { + oci.WithHostNamespace(specs.NetworkNamespace)(nil, nil, nil, s) +} + +func (h *hostNS) Close() error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/network/netproviders/network.go b/vendor/github.com/moby/buildkit/util/network/netproviders/network.go new file mode 100644 index 00000000..7b0b765b --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/netproviders/network.go @@ -0,0 +1,50 @@ +package netproviders + +import ( + "os" + + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/network" + "github.com/moby/buildkit/util/network/cniprovider" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type Opt struct { + CNI cniprovider.Opt + Mode string +} + +// Providers returns the network provider set +func Providers(opt Opt) (map[pb.NetMode]network.Provider, error) { + var defaultProvider network.Provider + switch opt.Mode { + case "cni": + cniProvider, err := cniprovider.New(opt.CNI) + if err != nil { + return nil, err + } + defaultProvider = cniProvider + case "host": + defaultProvider = network.NewHostProvider() + case "auto", "": + if _, err := os.Stat(opt.CNI.ConfigPath); err == nil { + cniProvider, err := cniprovider.New(opt.CNI) + if err != nil { + return nil, err + } + defaultProvider = cniProvider + } else { + logrus.Warnf("using host network as the default") + defaultProvider = network.NewHostProvider() + } + default: + return nil, errors.Errorf("invalid network mode: %q", opt.Mode) + } + + return map[pb.NetMode]network.Provider{ + pb.NetMode_UNSET: defaultProvider, + pb.NetMode_HOST: network.NewHostProvider(), + pb.NetMode_NONE: network.NewNoneProvider(), + }, nil +} diff --git a/vendor/github.com/moby/buildkit/util/network/network.go b/vendor/github.com/moby/buildkit/util/network/network.go new file mode 100644 index 00000000..70b0ccca --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/network.go @@ -0,0 +1,19 @@ +package network + +import ( + "io" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Provider interface for Network +type Provider interface { + New() (Namespace, error) +} + +// Namespace of network for workers +type Namespace interface { + io.Closer + // Set the namespace on the spec + Set(*specs.Spec) +} diff --git a/vendor/github.com/moby/buildkit/util/network/none.go b/vendor/github.com/moby/buildkit/util/network/none.go new file mode 100644 index 00000000..ebf1ebda --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/none.go @@ -0,0 +1,26 @@ +package network + +import ( + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +func NewNoneProvider() Provider { + return &none{} +} + +type none struct { +} + +func (h *none) New() (Namespace, error) { + return &noneNS{}, nil +} + +type noneNS struct { +} + +func (h *noneNS) Set(s *specs.Spec) { +} + +func (h *noneNS) Close() error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/progress/logs/logs.go b/vendor/github.com/moby/buildkit/util/progress/logs/logs.go new file mode 100644 index 00000000..54f6ff89 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/progress/logs/logs.go @@ -0,0 +1,53 @@ +package logs + +import ( + "context" + "io" + "os" + + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/util/progress" + "github.com/pkg/errors" +) + +func NewLogStreams(ctx context.Context, printOutput bool) (io.WriteCloser, io.WriteCloser) { + return newStreamWriter(ctx, 1, printOutput), newStreamWriter(ctx, 2, printOutput) +} + +func newStreamWriter(ctx context.Context, stream int, printOutput bool) io.WriteCloser { + pw, _, _ := progress.FromContext(ctx) + return &streamWriter{ + pw: pw, + stream: stream, + printOutput: printOutput, + } +} + +type streamWriter struct { + pw progress.Writer + stream int + printOutput bool +} + +func (sw *streamWriter) Write(dt []byte) (int, error) { + sw.pw.Write(identity.NewID(), client.VertexLog{ + Stream: sw.stream, + Data: append([]byte{}, dt...), + }) + if sw.printOutput { + switch sw.stream { + case 1: + return os.Stdout.Write(dt) + case 2: + return os.Stderr.Write(dt) + default: + return 0, errors.Errorf("invalid stream %d", sw.stream) + } + } + return len(dt), nil +} + +func (sw *streamWriter) Close() error { + return sw.pw.Close() +} diff --git a/vendor/github.com/moby/buildkit/util/progress/multireader.go b/vendor/github.com/moby/buildkit/util/progress/multireader.go new file mode 100644 index 00000000..2bd3f2ca --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/progress/multireader.go @@ -0,0 +1,77 @@ +package progress + +import ( + "context" + "io" + "sync" +) + +type MultiReader struct { + mu sync.Mutex + main Reader + initialized bool + done chan struct{} + writers map[*progressWriter]func() +} + +func NewMultiReader(pr Reader) *MultiReader { + mr := &MultiReader{ + main: pr, + writers: make(map[*progressWriter]func()), + done: make(chan struct{}), + } + return mr +} + +func (mr *MultiReader) Reader(ctx context.Context) Reader { + mr.mu.Lock() + defer mr.mu.Unlock() + + pr, ctx, closeWriter := NewContext(ctx) + pw, _, ctx := FromContext(ctx) + + w := pw.(*progressWriter) + mr.writers[w] = closeWriter + + go func() { + select { + case <-ctx.Done(): + case <-mr.done: + } + mr.mu.Lock() + defer mr.mu.Unlock() + delete(mr.writers, w) + }() + + if !mr.initialized { + go mr.handle() + mr.initialized = true + } + + return pr +} + +func (mr *MultiReader) handle() error { + for { + p, err := mr.main.Read(context.TODO()) + if err != nil { + if err == io.EOF { + mr.mu.Lock() + for w, c := range mr.writers { + w.Close() + c() + } + mr.mu.Unlock() + return nil + } + return err + } + mr.mu.Lock() + for _, p := range p { + for w := range mr.writers { + w.writeRawProgress(p) + } + } + mr.mu.Unlock() + } +} diff --git a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go new file mode 100644 index 00000000..51989368 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go @@ -0,0 +1,105 @@ +package progress + +import ( + "sort" + "sync" + "time" +) + +type rawProgressWriter interface { + WriteRawProgress(*Progress) error + Close() error +} + +type MultiWriter struct { + mu sync.Mutex + items []*Progress + writers map[rawProgressWriter]struct{} + done bool + meta map[string]interface{} +} + +func NewMultiWriter(opts ...WriterOption) *MultiWriter { + mw := &MultiWriter{ + writers: map[rawProgressWriter]struct{}{}, + meta: map[string]interface{}{}, + } + for _, o := range opts { + o(mw) + } + return mw +} + +func (ps *MultiWriter) Add(pw Writer) { + rw, ok := pw.(rawProgressWriter) + if !ok { + return + } + ps.mu.Lock() + plist := make([]*Progress, 0, len(ps.items)) + for _, p := range ps.items { + plist = append(plist, p) + } + sort.Slice(plist, func(i, j int) bool { + return plist[i].Timestamp.Before(plist[j].Timestamp) + }) + for _, p := range plist { + rw.WriteRawProgress(p) + } + ps.writers[rw] = struct{}{} + ps.mu.Unlock() +} + +func (ps *MultiWriter) Delete(pw Writer) { + rw, ok := pw.(rawProgressWriter) + if !ok { + return + } + + ps.mu.Lock() + delete(ps.writers, rw) + ps.mu.Unlock() +} + +func (ps *MultiWriter) Write(id string, v interface{}) error { + p := &Progress{ + ID: id, + Timestamp: time.Now(), + Sys: v, + meta: ps.meta, + } + return ps.WriteRawProgress(p) +} + +func (ps *MultiWriter) WriteRawProgress(p *Progress) error { + meta := p.meta + if len(ps.meta) > 0 { + meta = map[string]interface{}{} + for k, v := range p.meta { + meta[k] = v + } + for k, v := range ps.meta { + if _, ok := meta[k]; !ok { + meta[k] = v + } + } + } + p.meta = meta + return ps.writeRawProgress(p) +} + +func (ps *MultiWriter) writeRawProgress(p *Progress) error { + ps.mu.Lock() + defer ps.mu.Unlock() + ps.items = append(ps.items, p) + for w := range ps.writers { + if err := w.WriteRawProgress(p); err != nil { + return err + } + } + return nil +} + +func (ps *MultiWriter) Close() error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/progress/progress.go b/vendor/github.com/moby/buildkit/util/progress/progress.go new file mode 100644 index 00000000..ffe3d88b --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/progress/progress.go @@ -0,0 +1,256 @@ +package progress + +import ( + "context" + "io" + "sort" + "sync" + "time" + + "github.com/pkg/errors" +) + +// Progress package provides utility functions for using the context to capture +// progress of a running function. All progress items written contain an ID +// that is used to collapse unread messages. + +type contextKeyT string + +var contextKey = contextKeyT("buildkit/util/progress") + +// FromContext returns a progress writer from a context. +func FromContext(ctx context.Context, opts ...WriterOption) (Writer, bool, context.Context) { + v := ctx.Value(contextKey) + pw, ok := v.(*progressWriter) + if !ok { + if pw, ok := v.(*MultiWriter); ok { + return pw, true, ctx + } + return &noOpWriter{}, false, ctx + } + pw = newWriter(pw) + for _, o := range opts { + o(pw) + } + ctx = context.WithValue(ctx, contextKey, pw) + return pw, true, ctx +} + +type WriterOption func(Writer) + +// NewContext returns a new context and a progress reader that captures all +// progress items writtern to this context. Last returned parameter is a closer +// function to signal that no new writes will happen to this context. +func NewContext(ctx context.Context) (Reader, context.Context, func()) { + pr, pw, cancel := pipe() + ctx = WithProgress(ctx, pw) + return pr, ctx, cancel +} + +func WithProgress(ctx context.Context, pw Writer) context.Context { + return context.WithValue(ctx, contextKey, pw) +} + +func WithMetadata(key string, val interface{}) WriterOption { + return func(w Writer) { + if pw, ok := w.(*progressWriter); ok { + pw.meta[key] = val + } + if pw, ok := w.(*MultiWriter); ok { + pw.meta[key] = val + } + } +} + +type Writer interface { + Write(id string, value interface{}) error + Close() error +} + +type Reader interface { + Read(context.Context) ([]*Progress, error) +} + +type Progress struct { + ID string + Timestamp time.Time + Sys interface{} + meta map[string]interface{} +} + +type Status struct { + Action string + Current int + Total int + Started *time.Time + Completed *time.Time +} + +type progressReader struct { + ctx context.Context + cond *sync.Cond + mu sync.Mutex + writers map[*progressWriter]struct{} + dirty map[string]*Progress +} + +func (pr *progressReader) Read(ctx context.Context) ([]*Progress, error) { + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + case <-ctx.Done(): + pr.mu.Lock() + pr.cond.Broadcast() + pr.mu.Unlock() + } + }() + pr.mu.Lock() + for { + select { + case <-ctx.Done(): + pr.mu.Unlock() + return nil, ctx.Err() + default: + } + dmap := pr.dirty + if len(dmap) == 0 { + select { + case <-pr.ctx.Done(): + if len(pr.writers) == 0 { + pr.mu.Unlock() + return nil, io.EOF + } + default: + } + pr.cond.Wait() + continue + } + pr.dirty = make(map[string]*Progress) + pr.mu.Unlock() + + out := make([]*Progress, 0, len(dmap)) + for _, p := range dmap { + out = append(out, p) + } + + sort.Slice(out, func(i, j int) bool { + return out[i].Timestamp.Before(out[j].Timestamp) + }) + + return out, nil + } +} + +func (pr *progressReader) append(pw *progressWriter) { + pr.mu.Lock() + defer pr.mu.Unlock() + + select { + case <-pr.ctx.Done(): + return + default: + pr.writers[pw] = struct{}{} + } +} + +func pipe() (*progressReader, *progressWriter, func()) { + ctx, cancel := context.WithCancel(context.Background()) + pr := &progressReader{ + ctx: ctx, + writers: make(map[*progressWriter]struct{}), + dirty: make(map[string]*Progress), + } + pr.cond = sync.NewCond(&pr.mu) + go func() { + <-ctx.Done() + pr.mu.Lock() + pr.cond.Broadcast() + pr.mu.Unlock() + }() + pw := &progressWriter{ + reader: pr, + } + return pr, pw, cancel +} + +func newWriter(pw *progressWriter) *progressWriter { + meta := make(map[string]interface{}) + for k, v := range pw.meta { + meta[k] = v + } + pw = &progressWriter{ + reader: pw.reader, + meta: meta, + } + pw.reader.append(pw) + return pw +} + +type progressWriter struct { + done bool + reader *progressReader + meta map[string]interface{} +} + +func (pw *progressWriter) Write(id string, v interface{}) error { + if pw.done { + return errors.Errorf("writing %s to closed progress writer", id) + } + return pw.writeRawProgress(&Progress{ + ID: id, + Timestamp: time.Now(), + Sys: v, + meta: pw.meta, + }) +} + +func (pw *progressWriter) WriteRawProgress(p *Progress) error { + meta := p.meta + if len(pw.meta) > 0 { + meta = map[string]interface{}{} + for k, v := range p.meta { + meta[k] = v + } + for k, v := range pw.meta { + if _, ok := meta[k]; !ok { + meta[k] = v + } + } + } + p.meta = meta + return pw.writeRawProgress(p) +} + +func (pw *progressWriter) writeRawProgress(p *Progress) error { + pw.reader.mu.Lock() + pw.reader.dirty[p.ID] = p + pw.reader.cond.Broadcast() + pw.reader.mu.Unlock() + return nil +} + +func (pw *progressWriter) Close() error { + pw.reader.mu.Lock() + delete(pw.reader.writers, pw) + pw.reader.mu.Unlock() + pw.reader.cond.Broadcast() + pw.done = true + return nil +} + +func (p *Progress) Meta(key string) (interface{}, bool) { + v, ok := p.meta[key] + return v, ok +} + +type noOpWriter struct{} + +func (pw *noOpWriter) Write(_ string, _ interface{}) error { + return nil +} + +func (pw *noOpWriter) Close() error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/pull/pull.go b/vendor/github.com/moby/buildkit/util/pull/pull.go new file mode 100644 index 00000000..e22e7f3e --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/pull/pull.go @@ -0,0 +1,417 @@ +package pull + +import ( + "context" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/reference" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + "github.com/containerd/containerd/remotes/docker/schema1" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/util/progress" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type Puller struct { + Snapshotter snapshot.Snapshotter + ContentStore content.Store + Applier diff.Applier + Src reference.Spec + Platform *ocispec.Platform + // See NewResolver() + Resolver remotes.Resolver + resolveOnce sync.Once + desc ocispec.Descriptor + ref string + resolveErr error +} + +type Pulled struct { + Ref string + Descriptor ocispec.Descriptor + Layers []ocispec.Descriptor + MetadataBlobs []ocispec.Descriptor +} + +func (p *Puller) Resolve(ctx context.Context) (string, ocispec.Descriptor, error) { + p.resolveOnce.Do(func() { + resolveProgressDone := oneOffProgress(ctx, "resolve "+p.Src.String()) + + desc := ocispec.Descriptor{ + Digest: p.Src.Digest(), + } + if desc.Digest != "" { + info, err := p.ContentStore.Info(ctx, desc.Digest) + if err == nil { + desc.Size = info.Size + p.ref = p.Src.String() + ra, err := p.ContentStore.ReaderAt(ctx, desc) + if err == nil { + mt, err := imageutil.DetectManifestMediaType(ra) + if err == nil { + desc.MediaType = mt + p.desc = desc + resolveProgressDone(nil) + return + } + } + } + } + + ref, desc, err := p.Resolver.Resolve(ctx, p.Src.String()) + if err != nil { + p.resolveErr = err + resolveProgressDone(err) + return + } + p.desc = desc + p.ref = ref + resolveProgressDone(nil) + }) + return p.ref, p.desc, p.resolveErr +} + +func (p *Puller) Pull(ctx context.Context) (*Pulled, error) { + if _, _, err := p.Resolve(ctx); err != nil { + return nil, err + } + + var platform platforms.MatchComparer + if p.Platform != nil { + platform = platforms.Only(*p.Platform) + } else { + platform = platforms.Default() + } + + ongoing := newJobs(p.ref) + + pctx, stopProgress := context.WithCancel(ctx) + + go showProgress(pctx, ongoing, p.ContentStore) + + fetcher, err := p.Resolver.Fetcher(ctx, p.ref) + if err != nil { + stopProgress() + return nil, err + } + + // TODO: need a wrapper snapshot interface that combines content + // and snapshots as 1) buildkit shouldn't have a dependency on contentstore + // or 2) cachemanager should manage the contentstore + handlers := []images.Handler{ + images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + ongoing.add(desc) + return nil, nil + }), + } + var schema1Converter *schema1.Converter + if p.desc.MediaType == images.MediaTypeDockerSchema1Manifest { + schema1Converter = schema1.NewConverter(p.ContentStore, fetcher) + handlers = append(handlers, schema1Converter) + } else { + // Get all the children for a descriptor + childrenHandler := images.ChildrenHandler(p.ContentStore) + // Filter the children by the platform + childrenHandler = images.FilterPlatforms(childrenHandler, platform) + // Limit manifests pulled to the best match in an index + childrenHandler = images.LimitManifests(childrenHandler, platform, 1) + + dslHandler, err := docker.AppendDistributionSourceLabel(p.ContentStore, p.ref) + if err != nil { + stopProgress() + return nil, err + } + handlers = append(handlers, + remotes.FetchHandler(p.ContentStore, fetcher), + childrenHandler, + dslHandler, + ) + } + + if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, p.desc); err != nil { + stopProgress() + return nil, err + } + stopProgress() + + var usedBlobs, unusedBlobs []ocispec.Descriptor + + if schema1Converter != nil { + ongoing.remove(p.desc) // Not left in the content store so this is sufficient. + p.desc, err = schema1Converter.Convert(ctx) + if err != nil { + return nil, err + } + ongoing.add(p.desc) + + var mu sync.Mutex // images.Dispatch calls handlers in parallel + allBlobs := make(map[digest.Digest]ocispec.Descriptor) + for _, j := range ongoing.added { + allBlobs[j.Digest] = j.Descriptor + } + + handlers := []images.Handler{ + images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + mu.Lock() + defer mu.Unlock() + usedBlobs = append(usedBlobs, desc) + delete(allBlobs, desc.Digest) + return nil, nil + }), + images.FilterPlatforms(images.ChildrenHandler(p.ContentStore), platform), + } + + if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, p.desc); err != nil { + return nil, err + } + + for _, j := range allBlobs { + unusedBlobs = append(unusedBlobs, j) + } + } else { + for _, j := range ongoing.added { + usedBlobs = append(usedBlobs, j.Descriptor) + } + } + + // split all pulled data to layers and rest. layers remain roots and are deleted with snapshots. rest will be linked to layers. + var notLayerBlobs []ocispec.Descriptor + var layerBlobs []ocispec.Descriptor + for _, j := range usedBlobs { + switch j.MediaType { + case ocispec.MediaTypeImageLayer, images.MediaTypeDockerSchema2Layer, ocispec.MediaTypeImageLayerGzip, images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip: + layerBlobs = append(layerBlobs, j) + default: + notLayerBlobs = append(notLayerBlobs, j) + } + } + + layers, err := getLayers(ctx, p.ContentStore, p.desc, platform) + if err != nil { + return nil, err + } + + return &Pulled{ + Ref: p.ref, + Descriptor: p.desc, + Layers: layers, + MetadataBlobs: notLayerBlobs, + }, nil +} + +func getLayers(ctx context.Context, provider content.Provider, desc ocispec.Descriptor, platform platforms.MatchComparer) ([]ocispec.Descriptor, error) { + manifest, err := images.Manifest(ctx, provider, desc, platform) + if err != nil { + return nil, errors.WithStack(err) + } + image := images.Image{Target: desc} + diffIDs, err := image.RootFS(ctx, provider, platform) + if err != nil { + return nil, errors.Wrap(err, "failed to resolve rootfs") + } + if len(diffIDs) != len(manifest.Layers) { + return nil, errors.Errorf("mismatched image rootfs and manifest layers %+v %+v", diffIDs, manifest.Layers) + } + layers := make([]ocispec.Descriptor, len(diffIDs)) + for i := range diffIDs { + desc := manifest.Layers[i] + if desc.Annotations == nil { + desc.Annotations = map[string]string{} + } + desc.Annotations["containerd.io/uncompressed"] = diffIDs[i].String() + layers[i] = desc + } + return layers, nil +} + +func showProgress(ctx context.Context, ongoing *jobs, cs content.Store) { + var ( + ticker = time.NewTicker(150 * time.Millisecond) + statuses = map[string]statusInfo{} + done bool + ) + defer ticker.Stop() + + pw, _, ctx := progress.FromContext(ctx) + defer pw.Close() + + for { + select { + case <-ticker.C: + case <-ctx.Done(): + done = true + } + + resolved := "resolved" + if !ongoing.isResolved() { + resolved = "resolving" + } + statuses[ongoing.name] = statusInfo{ + Ref: ongoing.name, + Status: resolved, + } + + actives := make(map[string]statusInfo) + + if !done { + active, err := cs.ListStatuses(ctx, "") + if err != nil { + // log.G(ctx).WithError(err).Error("active check failed") + continue + } + // update status of active entries! + for _, active := range active { + actives[active.Ref] = statusInfo{ + Ref: active.Ref, + Status: "downloading", + Offset: active.Offset, + Total: active.Total, + StartedAt: active.StartedAt, + UpdatedAt: active.UpdatedAt, + } + } + } + + // now, update the items in jobs that are not in active + for _, j := range ongoing.jobs() { + refKey := remotes.MakeRefKey(ctx, j.Descriptor) + if a, ok := actives[refKey]; ok { + started := j.started + pw.Write(j.Digest.String(), progress.Status{ + Action: a.Status, + Total: int(a.Total), + Current: int(a.Offset), + Started: &started, + }) + continue + } + + if !j.done { + info, err := cs.Info(context.TODO(), j.Digest) + if err != nil { + if errdefs.IsNotFound(err) { + pw.Write(j.Digest.String(), progress.Status{ + Action: "waiting", + }) + continue + } + } else { + j.done = true + } + + if done || j.done { + started := j.started + createdAt := info.CreatedAt + pw.Write(j.Digest.String(), progress.Status{ + Action: "done", + Current: int(info.Size), + Total: int(info.Size), + Completed: &createdAt, + Started: &started, + }) + } + } + } + if done { + return + } + } +} + +// jobs provides a way of identifying the download keys for a particular task +// encountering during the pull walk. +// +// This is very minimal and will probably be replaced with something more +// featured. +type jobs struct { + name string + added map[digest.Digest]*job + mu sync.Mutex + resolved bool +} + +type job struct { + ocispec.Descriptor + done bool + started time.Time +} + +func newJobs(name string) *jobs { + return &jobs{ + name: name, + added: make(map[digest.Digest]*job), + } +} + +func (j *jobs) add(desc ocispec.Descriptor) { + j.mu.Lock() + defer j.mu.Unlock() + + if _, ok := j.added[desc.Digest]; ok { + return + } + j.added[desc.Digest] = &job{ + Descriptor: desc, + started: time.Now(), + } +} + +func (j *jobs) remove(desc ocispec.Descriptor) { + j.mu.Lock() + defer j.mu.Unlock() + + delete(j.added, desc.Digest) +} + +func (j *jobs) jobs() []*job { + j.mu.Lock() + defer j.mu.Unlock() + + descs := make([]*job, 0, len(j.added)) + for _, j := range j.added { + descs = append(descs, j) + } + return descs +} + +func (j *jobs) isResolved() bool { + j.mu.Lock() + defer j.mu.Unlock() + return j.resolved +} + +type statusInfo struct { + Ref string + Status string + Offset int64 + Total int64 + StartedAt time.Time + UpdatedAt time.Time +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} diff --git a/vendor/github.com/moby/buildkit/util/pull/resolver.go b/vendor/github.com/moby/buildkit/util/pull/resolver.go new file mode 100644 index 00000000..ca425c24 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/pull/resolver.go @@ -0,0 +1,178 @@ +package pull + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + distreference "github.com/docker/distribution/reference" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/util/resolver" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +var cache *resolverCache + +func init() { + cache = newResolverCache() +} + +func NewResolver(ctx context.Context, hosts docker.RegistryHosts, sm *session.Manager, imageStore images.Store, mode source.ResolveMode, ref string) remotes.Resolver { + if res := cache.Get(ctx, ref); res != nil { + return withLocal(res, imageStore, mode) + } + + r := resolver.New(ctx, hosts, sm) + r = cache.Add(ctx, ref, r) + + return withLocal(r, imageStore, mode) +} + +func EnsureManifestRequested(ctx context.Context, res remotes.Resolver, ref string) { + rr := res + lr, ok := res.(withLocalResolver) + if ok { + if atomic.LoadInt64(&lr.counter) > 0 { + return + } + rr = lr.Resolver + } + cr, ok := rr.(*cachedResolver) + if !ok { + return + } + if atomic.LoadInt64(&cr.counter) == 0 { + res.Resolve(ctx, ref) + } +} + +func withLocal(r remotes.Resolver, imageStore images.Store, mode source.ResolveMode) remotes.Resolver { + if imageStore == nil || mode == source.ResolveModeForcePull { + return r + } + + return withLocalResolver{Resolver: r, is: imageStore, mode: mode} +} + +// A remotes.Resolver which checks the local image store if the real +// resolver cannot find the image, essentially falling back to a local +// image if one is present. +// +// We do not override the Fetcher or Pusher methods: +// +// - Fetcher is called by github.com/containerd/containerd/remotes/:fetch() +// only after it has checked for the content locally, so avoid the +// hassle of interposing a local-fetch proxy and simply pass on the +// request. +// - Pusher wouldn't make sense to push locally, so just forward. + +type withLocalResolver struct { + counter int64 // needs to be 64bit aligned for 32bit systems + remotes.Resolver + is images.Store + mode source.ResolveMode +} + +func (r withLocalResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) { + if r.mode == source.ResolveModePreferLocal { + if img, err := r.is.Get(ctx, ref); err == nil { + atomic.AddInt64(&r.counter, 1) + return ref, img.Target, nil + } + } + + n, desc, err := r.Resolver.Resolve(ctx, ref) + if err == nil { + return n, desc, err + } + + if r.mode == source.ResolveModeDefault { + if img, err := r.is.Get(ctx, ref); err == nil { + return ref, img.Target, nil + } + } + + return "", ocispec.Descriptor{}, err +} + +type resolverCache struct { + mu sync.Mutex + m map[string]cachedResolver +} + +type cachedResolver struct { + counter int64 + timeout time.Time + remotes.Resolver +} + +func (cr *cachedResolver) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) { + atomic.AddInt64(&cr.counter, 1) + return cr.Resolver.Resolve(ctx, ref) +} + +func (r *resolverCache) Add(ctx context.Context, ref string, resolver remotes.Resolver) remotes.Resolver { + r.mu.Lock() + defer r.mu.Unlock() + + ref = r.repo(ref) + "-" + session.FromContext(ctx) + + cr, ok := r.m[ref] + cr.timeout = time.Now().Add(time.Minute) + if ok { + return &cr + } + + cr.Resolver = resolver + r.m[ref] = cr + return &cr +} + +func (r *resolverCache) repo(refStr string) string { + ref, err := distreference.ParseNormalizedNamed(refStr) + if err != nil { + return refStr + } + return ref.Name() +} + +func (r *resolverCache) Get(ctx context.Context, ref string) remotes.Resolver { + r.mu.Lock() + defer r.mu.Unlock() + + ref = r.repo(ref) + "-" + session.FromContext(ctx) + + cr, ok := r.m[ref] + if !ok { + return nil + } + return &cr +} + +func (r *resolverCache) clean(now time.Time) { + r.mu.Lock() + for k, cr := range r.m { + if now.After(cr.timeout) { + delete(r.m, k) + } + } + r.mu.Unlock() +} + +func newResolverCache() *resolverCache { + rc := &resolverCache{ + m: map[string]cachedResolver{}, + } + t := time.NewTicker(time.Minute) + go func() { + for { + rc.clean(<-t.C) + } + }() + return rc +} diff --git a/vendor/github.com/moby/buildkit/util/push/push.go b/vendor/github.com/moby/buildkit/util/push/push.go new file mode 100644 index 00000000..b8d372d7 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/push/push.go @@ -0,0 +1,285 @@ +package push + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/resolver" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func Push(ctx context.Context, sm *session.Manager, cs content.Store, dgst digest.Digest, ref string, insecure bool, hosts docker.RegistryHosts, byDigest bool) error { + desc := ocispec.Descriptor{ + Digest: dgst, + } + parsed, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return err + } + if byDigest && !reference.IsNameOnly(parsed) { + return errors.Errorf("can't push tagged ref %s by digest", parsed.String()) + } + + if byDigest { + ref = parsed.Name() + } else { + ref = reference.TagNameOnly(parsed).String() + } + + resolver := resolver.New(ctx, hosts, sm) + + pusher, err := resolver.Pusher(ctx, ref) + if err != nil { + return err + } + + var m sync.Mutex + manifestStack := []ocispec.Descriptor{} + + filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + m.Lock() + manifestStack = append(manifestStack, desc) + m.Unlock() + return nil, images.ErrStopHandler + default: + return nil, nil + } + }) + + pushHandler := remotes.PushHandler(pusher, cs) + pushUpdateSourceHandler, err := updateDistributionSourceHandler(cs, pushHandler, ref) + if err != nil { + return err + } + + handlers := append([]images.Handler{}, + images.HandlerFunc(annotateDistributionSourceHandler(cs, childrenHandler(cs))), + filterHandler, + dedupeHandler(pushUpdateSourceHandler), + ) + + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return err + } + + mtype, err := imageutil.DetectManifestMediaType(ra) + if err != nil { + return err + } + + layersDone := oneOffProgress(ctx, "pushing layers") + err = images.Dispatch(ctx, images.Handlers(handlers...), nil, ocispec.Descriptor{ + Digest: dgst, + Size: ra.Size(), + MediaType: mtype, + }) + layersDone(err) + if err != nil { + return err + } + + mfstDone := oneOffProgress(ctx, fmt.Sprintf("pushing manifest for %s", ref)) + for i := len(manifestStack) - 1; i >= 0; i-- { + _, err := pushHandler(ctx, manifestStack[i]) + if err != nil { + mfstDone(err) + return err + } + } + mfstDone(nil) + return nil +} + +func annotateDistributionSourceHandler(cs content.Store, f images.HandlerFunc) func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // only add distribution source for the config or blob data descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + default: + return children, nil + } + + for i := range children { + child := children[i] + + info, err := cs.Info(ctx, child.Digest) + if err != nil { + return nil, err + } + + for k, v := range info.Labels { + if !strings.HasPrefix(k, "containerd.io/distribution.source.") { + continue + } + + if child.Annotations == nil { + child.Annotations = map[string]string{} + } + child.Annotations[k] = v + } + + children[i] = child + } + return children, nil + } +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} + +func childrenHandler(provider content.Provider) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + var descs []ocispec.Descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + // TODO(stevvooe): We just assume oci manifest, for now. There may be + // subtle differences from the docker version. + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + descs = append(descs, manifest.Config) + descs = append(descs, manifest.Layers...) + case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + var index ocispec.Index + if err := json.Unmarshal(p, &index); err != nil { + return nil, err + } + + for _, m := range index.Manifests { + if m.Digest != "" { + descs = append(descs, m) + } + } + case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, + images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, + ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip: + // childless data types. + return nil, nil + default: + logrus.Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType) + } + + return descs, nil + } +} + +// updateDistributionSourceHandler will update distribution source label after +// pushing layer successfully. +// +// FIXME(fuweid): There is race condition for current design of distribution +// source label if there are pull/push jobs consuming same layer. +func updateDistributionSourceHandler(cs content.Store, pushF images.HandlerFunc, ref string) (images.HandlerFunc, error) { + updateF, err := docker.AppendDistributionSourceLabel(cs, ref) + if err != nil { + return nil, err + } + + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + var islayer bool + + switch desc.MediaType { + case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, + ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip: + islayer = true + } + + children, err := pushF(ctx, desc) + if err != nil { + return nil, err + } + + // update distribution source to layer + if islayer { + if _, err := updateF(ctx, desc); err != nil { + logrus.Warnf("failed to update distribution source for layer %v: %v", desc.Digest, err) + } + } + return children, nil + }), nil +} + +func dedupeHandler(h images.HandlerFunc) images.HandlerFunc { + var g flightcontrol.Group + res := map[digest.Digest][]ocispec.Descriptor{} + var mu sync.Mutex + + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + res, err := g.Do(ctx, desc.Digest.String(), func(ctx context.Context) (interface{}, error) { + mu.Lock() + if r, ok := res[desc.Digest]; ok { + mu.Unlock() + return r, nil + } + mu.Unlock() + + children, err := h(ctx, desc) + if err != nil { + return nil, err + } + + mu.Lock() + res[desc.Digest] = children + mu.Unlock() + return children, nil + }) + if err != nil { + return nil, err + } + if res == nil { + return nil, nil + } + return res.([]ocispec.Descriptor), nil + }) +} diff --git a/vendor/github.com/moby/buildkit/util/resolver/resolver.go b/vendor/github.com/moby/buildkit/util/resolver/resolver.go new file mode 100644 index 00000000..8d8b9f7f --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/resolver/resolver.go @@ -0,0 +1,219 @@ +package resolver + +import ( + "context" + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + "github.com/moby/buildkit/cmd/buildkitd/config" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/auth" + "github.com/moby/buildkit/util/tracing" + "github.com/pkg/errors" +) + +func fillInsecureOpts(host string, c config.RegistryConfig, h *docker.RegistryHost) error { + tc, err := loadTLSConfig(c) + if err != nil { + return err + } + + if c.PlainHTTP != nil && *c.PlainHTTP { + h.Scheme = "http" + } else if c.Insecure != nil && *c.Insecure { + tc.InsecureSkipVerify = true + } else if c.PlainHTTP == nil { + if ok, _ := docker.MatchLocalhost(host); ok { + h.Scheme = "http" + } + } + + transport := newDefaultTransport() + transport.TLSClientConfig = tc + + h.Client = &http.Client{ + Transport: tracing.NewTransport(transport), + } + return nil +} + +func loadTLSConfig(c config.RegistryConfig) (*tls.Config, error) { + for _, d := range c.TLSConfigDir { + fs, err := ioutil.ReadDir(d) + if err != nil && !os.IsNotExist(err) && !os.IsPermission(err) { + return nil, errors.WithStack(err) + } + for _, f := range fs { + if strings.HasSuffix(f.Name(), ".crt") { + c.RootCAs = append(c.RootCAs, filepath.Join(d, f.Name())) + } + if strings.HasSuffix(f.Name(), ".cert") { + c.KeyPairs = append(c.KeyPairs, config.TLSKeyPair{ + Certificate: filepath.Join(d, f.Name()), + Key: filepath.Join(d, strings.TrimSuffix(f.Name(), ".cert")+".key"), + }) + } + } + } + + tc := &tls.Config{} + if len(c.RootCAs) > 0 { + systemPool, err := x509.SystemCertPool() + if err != nil { + if runtime.GOOS == "windows" { + systemPool = x509.NewCertPool() + } else { + return nil, errors.Wrapf(err, "unable to get system cert pool") + } + } + tc.RootCAs = systemPool + } + + for _, p := range c.RootCAs { + dt, err := ioutil.ReadFile(p) + if err != nil { + return nil, errors.Wrapf(err, "failed to read %s", p) + } + tc.RootCAs.AppendCertsFromPEM(dt) + } + + for _, kp := range c.KeyPairs { + cert, err := tls.LoadX509KeyPair(kp.Certificate, kp.Key) + if err != nil { + return nil, errors.Wrapf(err, "failed to load keypair for %s", kp.Certificate) + } + tc.Certificates = append(tc.Certificates, cert) + } + return tc, nil +} + +func NewRegistryConfig(m map[string]config.RegistryConfig) docker.RegistryHosts { + return docker.Registries( + func(host string) ([]docker.RegistryHost, error) { + c, ok := m[host] + if !ok { + return nil, nil + } + + var out []docker.RegistryHost + + for _, mirror := range c.Mirrors { + h := docker.RegistryHost{ + Scheme: "https", + Client: newDefaultClient(), + Host: mirror, + Path: "/v2", + Capabilities: docker.HostCapabilityPull | docker.HostCapabilityResolve, + } + + if err := fillInsecureOpts(mirror, m[mirror], &h); err != nil { + return nil, err + } + + out = append(out, h) + } + + if host == "docker.io" { + host = "registry-1.docker.io" + } + + h := docker.RegistryHost{ + Scheme: "https", + Client: newDefaultClient(), + Host: host, + Path: "/v2", + Capabilities: docker.HostCapabilityPush | docker.HostCapabilityPull | docker.HostCapabilityResolve, + } + + if err := fillInsecureOpts(host, c, &h); err != nil { + return nil, err + } + + out = append(out, h) + return out, nil + }, + docker.ConfigureDefaultRegistries( + docker.WithClient(newDefaultClient()), + docker.WithPlainHTTP(docker.MatchLocalhost), + ), + ) +} + +func New(ctx context.Context, hosts docker.RegistryHosts, sm *session.Manager) remotes.Resolver { + return docker.NewResolver(docker.ResolverOptions{ + Hosts: hostsWithCredentials(ctx, hosts, sm), + }) +} + +func hostsWithCredentials(ctx context.Context, hosts docker.RegistryHosts, sm *session.Manager) docker.RegistryHosts { + id := session.FromContext(ctx) + if id == "" { + return hosts + } + return func(domain string) ([]docker.RegistryHost, error) { + res, err := hosts(domain) + if err != nil { + return nil, err + } + if len(res) == 0 { + return nil, nil + } + + timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + caller, err := sm.Get(timeoutCtx, id) + if err != nil { + return nil, err + } + + a := docker.NewDockerAuthorizer( + docker.WithAuthClient(res[0].Client), + docker.WithAuthCreds(auth.CredentialsFunc(context.TODO(), caller)), + ) + for i := range res { + res[i].Authorizer = a + } + return res, nil + } +} + +func newDefaultClient() *http.Client { + return &http.Client{ + Transport: newDefaultTransport(), + } +} + +// newDefaultTransport is for pull or push client +// +// NOTE: For push, there must disable http2 for https because the flow control +// will limit data transfer. The net/http package doesn't provide http2 tunable +// settings which limits push performance. +// +// REF: https://github.com/golang/go/issues/14077 +func newDefaultTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 10, + IdleConnTimeout: 30 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 5 * time.Second, + DisableKeepAlives: true, + TLSNextProto: make(map[string]func(authority string, c *tls.Conn) http.RoundTripper), + } +} diff --git a/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go b/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go new file mode 100644 index 00000000..12646e43 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_linux.go @@ -0,0 +1,40 @@ +package specconv + +import ( + "strings" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +// ToRootless converts spec to be compatible with "rootless" runc. +// * Remove /sys mount +// * Remove cgroups +// +// See docs/rootless.md for the supported runc revision. +func ToRootless(spec *specs.Spec) error { + // Remove /sys mount because we can't mount /sys when the daemon netns + // is not unshared from the host. + // + // Instead, we could bind-mount /sys from the host, however, `rbind, ro` + // does not make /sys/fs/cgroup read-only (and we can't bind-mount /sys + // without rbind) + // + // PR for making /sys/fs/cgroup read-only is proposed, but it is very + // complicated: https://github.com/opencontainers/runc/pull/1869 + // + // For buildkit usecase, we suppose we don't need to provide /sys to + // containers and remove /sys mount as a workaround. + var mounts []specs.Mount + for _, mount := range spec.Mounts { + if strings.HasPrefix(mount.Destination, "/sys") { + continue + } + mounts = append(mounts, mount) + } + spec.Mounts = mounts + + // Remove cgroups so as to avoid `container_linux.go:337: starting container process caused "process_linux.go:280: applying cgroup configuration for process caused \"mkdir /sys/fs/cgroup/cpuset/buildkit: permission denied\""` + spec.Linux.Resources = nil + spec.Linux.CgroupsPath = "" + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/system/path_unix.go b/vendor/github.com/moby/buildkit/util/system/path_unix.go new file mode 100644 index 00000000..c607c4db --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/path_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. This is a no-op on Linux. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/moby/buildkit/util/system/path_windows.go b/vendor/github.com/moby/buildkit/util/system/path_windows.go new file mode 100644 index 00000000..cbfe2c15 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/path_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package system + +import ( + "fmt" + "path/filepath" + "strings" +) + +// DefaultPathEnv is deliberately empty on Windows as the default path will be set by +// the container. Docker has no context of what the default path should be. +const DefaultPathEnv = "" + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be contatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go b/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go new file mode 100644 index 00000000..62afa03f --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go @@ -0,0 +1,29 @@ +// +build linux,seccomp + +package system + +import ( + "sync" + + "golang.org/x/sys/unix" +) + +var seccompSupported bool +var seccompOnce sync.Once + +func SeccompSupported() bool { + seccompOnce.Do(func() { + seccompSupported = getSeccompSupported() + }) + return seccompSupported +} + +func getSeccompSupported() bool { + if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go b/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go new file mode 100644 index 00000000..e348c379 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go @@ -0,0 +1,7 @@ +// +build !linux,seccomp + +package system + +func SeccompSupported() bool { + return false +} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go b/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go new file mode 100644 index 00000000..84cfb7fa --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go @@ -0,0 +1,7 @@ +// +build !seccomp + +package system + +func SeccompSupported() bool { + return false +} diff --git a/vendor/github.com/moby/buildkit/util/throttle/throttle.go b/vendor/github.com/moby/buildkit/util/throttle/throttle.go new file mode 100644 index 00000000..490ccd9c --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/throttle/throttle.go @@ -0,0 +1,58 @@ +package throttle + +import ( + "sync" + "time" +) + +// Throttle wraps a function so that internal function does not get called +// more frequently than the specified duration. +func Throttle(d time.Duration, f func()) func() { + return throttle(d, f, true) +} + +// ThrottleAfter wraps a function so that internal function does not get called +// more frequently than the specified duration. The delay is added after function +// has been called. +func ThrottleAfter(d time.Duration, f func()) func() { + return throttle(d, f, false) +} + +func throttle(d time.Duration, f func(), wait bool) func() { + var next, running bool + var mu sync.Mutex + return func() { + mu.Lock() + defer mu.Unlock() + + next = true + if !running { + running = true + go func() { + for { + mu.Lock() + if next == false { + running = false + mu.Unlock() + return + } + if !wait { + next = false + } + mu.Unlock() + + if wait { + time.Sleep(d) + mu.Lock() + next = false + mu.Unlock() + f() + } else { + f() + time.Sleep(d) + } + } + }() + } + } +} diff --git a/vendor/github.com/moby/buildkit/util/tracing/multispan.go b/vendor/github.com/moby/buildkit/util/tracing/multispan.go new file mode 100644 index 00000000..2b157a1d --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/tracing/multispan.go @@ -0,0 +1,22 @@ +package tracing + +import ( + opentracing "github.com/opentracing/opentracing-go" +) + +// MultiSpan allows shared tracing to multiple spans. +// TODO: This is a temporary solution and doesn't really support shared tracing yet. Instead the first always wins. + +type MultiSpan struct { + opentracing.Span +} + +func NewMultiSpan() *MultiSpan { + return &MultiSpan{} +} + +func (ms *MultiSpan) Add(s opentracing.Span) { + if ms.Span == nil { + ms.Span = s + } +} diff --git a/vendor/github.com/moby/buildkit/util/tracing/tracing.go b/vendor/github.com/moby/buildkit/util/tracing/tracing.go new file mode 100644 index 00000000..8f2d4dd9 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/tracing/tracing.go @@ -0,0 +1,115 @@ +package tracing + +import ( + "context" + "fmt" + "io" + "net/http" + + "github.com/opentracing-contrib/go-stdlib/nethttp" + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" +) + +// StartSpan starts a new span as a child of the span in context. +// If there is no span in context then this is a no-op. +// The difference from opentracing.StartSpanFromContext is that this method +// does not depend on global tracer. +func StartSpan(ctx context.Context, operationName string, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) { + parent := opentracing.SpanFromContext(ctx) + tracer := opentracing.Tracer(&opentracing.NoopTracer{}) + if parent != nil { + tracer = parent.Tracer() + opts = append(opts, opentracing.ChildOf(parent.Context())) + } + span := tracer.StartSpan(operationName, opts...) + if parent != nil { + return span, opentracing.ContextWithSpan(ctx, span) + } + return span, ctx +} + +// FinishWithError finalizes the span and sets the error if one is passed +func FinishWithError(span opentracing.Span, err error) { + if err != nil { + fields := []log.Field{ + log.String("event", "error"), + log.String("message", err.Error()), + } + if _, ok := err.(interface { + Cause() error + }); ok { + fields = append(fields, log.String("stack", fmt.Sprintf("%+v", err))) + } + span.LogFields(fields...) + ext.Error.Set(span, true) + } + span.Finish() +} + +// ContextWithSpanFromContext sets the tracing span of a context from other +// context if one is not already set. Alternative would be +// context.WithoutCancel() that would copy the context but reset ctx.Done +func ContextWithSpanFromContext(ctx, ctx2 context.Context) context.Context { + // if already is a span then noop + if span := opentracing.SpanFromContext(ctx); span != nil { + return ctx + } + if span := opentracing.SpanFromContext(ctx2); span != nil { + return opentracing.ContextWithSpan(ctx, span) + } + return ctx +} + +var DefaultTransport http.RoundTripper = &Transport{ + RoundTripper: &nethttp.Transport{RoundTripper: http.DefaultTransport}, +} + +var DefaultClient = &http.Client{ + Transport: DefaultTransport, +} + +type Transport struct { + http.RoundTripper +} + +func NewTransport(rt http.RoundTripper) http.RoundTripper { + return &Transport{ + RoundTripper: &nethttp.Transport{RoundTripper: rt}, + } +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + span := opentracing.SpanFromContext(req.Context()) + if span == nil { // no tracer connected with either request or transport + return t.RoundTripper.RoundTrip(req) + } + + req, tracer := nethttp.TraceRequest(span.Tracer(), req) + + resp, err := t.RoundTripper.RoundTrip(req) + if err != nil { + tracer.Finish() + return resp, err + } + + if req.Method == "HEAD" { + tracer.Finish() + } else { + resp.Body = closeTracker{resp.Body, tracer.Finish} + } + + return resp, err +} + +type closeTracker struct { + io.ReadCloser + finish func() +} + +func (c closeTracker) Close() error { + err := c.ReadCloser.Close() + c.finish() + return err +} diff --git a/vendor/github.com/moby/buildkit/util/winlayers/applier.go b/vendor/github.com/moby/buildkit/util/winlayers/applier.go new file mode 100644 index 00000000..91e41523 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/winlayers/applier.go @@ -0,0 +1,187 @@ +package winlayers + +import ( + "archive/tar" + "context" + "io" + "io/ioutil" + "runtime" + "strings" + "sync" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/mount" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func NewFileSystemApplierWithWindows(cs content.Provider, a diff.Applier) diff.Applier { + if runtime.GOOS == "windows" { + return a + } + + return &winApplier{ + cs: cs, + a: a, + } +} + +type winApplier struct { + cs content.Provider + a diff.Applier +} + +func (s *winApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispec.Descriptor, err error) { + if !hasWindowsLayerMode(ctx) { + return s.a.Apply(ctx, desc, mounts, opts...) + } + + compressed, err := images.DiffCompression(ctx, desc.MediaType) + if err != nil { + return ocispec.Descriptor{}, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", desc.MediaType) + } + + var ocidesc ocispec.Descriptor + if err := mount.WithTempMount(ctx, mounts, func(root string) error { + ra, err := s.cs.ReaderAt(ctx, desc) + if err != nil { + return errors.Wrap(err, "failed to get reader from content store") + } + defer ra.Close() + + r := content.NewReader(ra) + if compressed != "" { + ds, err := compression.DecompressStream(r) + if err != nil { + return err + } + defer ds.Close() + r = ds + } + + digester := digest.Canonical.Digester() + rc := &readCounter{ + r: io.TeeReader(r, digester.Hash()), + } + + rc2, discard := filter(rc, func(hdr *tar.Header) bool { + if strings.HasPrefix(hdr.Name, "Files/") { + hdr.Name = strings.TrimPrefix(hdr.Name, "Files/") + hdr.Linkname = strings.TrimPrefix(hdr.Linkname, "Files/") + // TODO: could convert the windows PAX headers to xattr here to reuse + // the original ones in diff for parent directories and file modifications + return true + } + return false + }) + + if _, err := archive.Apply(ctx, root, rc2); err != nil { + discard(err) + return err + } + + // Read any trailing data + if _, err := io.Copy(ioutil.Discard, rc); err != nil { + discard(err) + return err + } + + ocidesc = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayer, + Size: rc.c, + Digest: digester.Digest(), + } + return nil + + }); err != nil { + return ocispec.Descriptor{}, err + } + return ocidesc, nil +} + +type readCounter struct { + r io.Reader + c int64 +} + +func (rc *readCounter) Read(p []byte) (n int, err error) { + n, err = rc.r.Read(p) + rc.c += int64(n) + return +} + +func filter(in io.Reader, f func(*tar.Header) bool) (io.Reader, func(error)) { + pr, pw := io.Pipe() + + rc := &readCanceler{Reader: in} + + go func() { + tarReader := tar.NewReader(rc) + tarWriter := tar.NewWriter(pw) + + pw.CloseWithError(func() error { + for { + h, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if f(h) { + if err := tarWriter.WriteHeader(h); err != nil { + return err + } + if h.Size > 0 { + if _, err := io.Copy(tarWriter, tarReader); err != nil { + return err + } + } + } else { + if h.Size > 0 { + if _, err := io.Copy(ioutil.Discard, tarReader); err != nil { + return err + } + } + } + } + return tarWriter.Close() + }()) + }() + + discard := func(err error) { + rc.cancel(err) + pw.CloseWithError(err) + } + + return pr, discard +} + +type readCanceler struct { + mu sync.Mutex + io.Reader + err error +} + +func (r *readCanceler) Read(b []byte) (int, error) { + r.mu.Lock() + if r.err != nil { + r.mu.Unlock() + return 0, r.err + } + n, err := r.Reader.Read(b) + r.mu.Unlock() + return n, err +} + +func (r *readCanceler) cancel(err error) { + r.mu.Lock() + r.err = err + r.mu.Unlock() +} diff --git a/vendor/github.com/moby/buildkit/util/winlayers/context.go b/vendor/github.com/moby/buildkit/util/winlayers/context.go new file mode 100644 index 00000000..c0bd3f8a --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/winlayers/context.go @@ -0,0 +1,19 @@ +package winlayers + +import "context" + +type contextKeyT string + +var contextKey = contextKeyT("buildkit/winlayers-on") + +func UseWindowsLayerMode(ctx context.Context) context.Context { + return context.WithValue(ctx, contextKey, true) +} + +func hasWindowsLayerMode(ctx context.Context) bool { + v := ctx.Value(contextKey) + if v == nil { + return false + } + return true +} diff --git a/vendor/github.com/moby/buildkit/util/winlayers/differ.go b/vendor/github.com/moby/buildkit/util/winlayers/differ.go new file mode 100644 index 00000000..cdbc335d --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/winlayers/differ.go @@ -0,0 +1,274 @@ +package winlayers + +import ( + "archive/tar" + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "io" + "time" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + keyFileAttr = "MSWINDOWS.fileattr" + keySDRaw = "MSWINDOWS.rawsd" + keyCreationTime = "LIBARCHIVE.creationtime" +) + +func NewWalkingDiffWithWindows(store content.Store, d diff.Comparer) diff.Comparer { + return &winDiffer{ + store: store, + d: d, + } +} + +var emptyDesc = ocispec.Descriptor{} + +type winDiffer struct { + store content.Store + d diff.Comparer +} + +// Compare creates a diff between the given mounts and uploads the result +// to the content store. +func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) { + if !hasWindowsLayerMode(ctx) { + return s.d.Compare(ctx, lower, upper, opts...) + } + + var config diff.Config + for _, opt := range opts { + if err := opt(&config); err != nil { + return emptyDesc, err + } + } + + if config.MediaType == "" { + config.MediaType = ocispec.MediaTypeImageLayerGzip + } + + var isCompressed bool + switch config.MediaType { + case ocispec.MediaTypeImageLayer: + case ocispec.MediaTypeImageLayerGzip: + isCompressed = true + default: + return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", config.MediaType) + } + + var ocidesc ocispec.Descriptor + if err := mount.WithTempMount(ctx, lower, func(lowerRoot string) error { + return mount.WithTempMount(ctx, upper, func(upperRoot string) error { + var newReference bool + if config.Reference == "" { + newReference = true + config.Reference = uniqueRef() + } + + cw, err := s.store.Writer(ctx, + content.WithRef(config.Reference), + content.WithDescriptor(ocispec.Descriptor{ + MediaType: config.MediaType, // most contentstore implementations just ignore this + })) + if err != nil { + return errors.Wrap(err, "failed to open writer") + } + defer func() { + if err != nil { + cw.Close() + if newReference { + if err := s.store.Abort(ctx, config.Reference); err != nil { + log.G(ctx).WithField("ref", config.Reference).Warnf("failed to delete diff upload") + } + } + } + }() + if !newReference { + if err := cw.Truncate(0); err != nil { + return err + } + } + + if isCompressed { + dgstr := digest.SHA256.Digester() + compressed, err := compression.CompressStream(cw, compression.Gzip) + if err != nil { + return errors.Wrap(err, "failed to get compressed stream") + } + var w io.Writer = io.MultiWriter(compressed, dgstr.Hash()) + w, discard, done := makeWindowsLayer(w) + err = archive.WriteDiff(ctx, w, lowerRoot, upperRoot) + if err != nil { + discard(err) + } + <-done + compressed.Close() + if err != nil { + return errors.Wrap(err, "failed to write compressed diff") + } + + if config.Labels == nil { + config.Labels = map[string]string{} + } + config.Labels["containerd.io/uncompressed"] = dgstr.Digest().String() + } else { + w, discard, done := makeWindowsLayer(cw) + if err = archive.WriteDiff(ctx, w, lowerRoot, upperRoot); err != nil { + discard(err) + return errors.Wrap(err, "failed to write diff") + } + <-done + } + + var commitopts []content.Opt + if config.Labels != nil { + commitopts = append(commitopts, content.WithLabels(config.Labels)) + } + + dgst := cw.Digest() + if err := cw.Commit(ctx, 0, dgst, commitopts...); err != nil { + return errors.Wrap(err, "failed to commit") + } + + info, err := s.store.Info(ctx, dgst) + if err != nil { + return errors.Wrap(err, "failed to get info from content store") + } + + ocidesc = ocispec.Descriptor{ + MediaType: config.MediaType, + Size: info.Size, + Digest: info.Digest, + } + return nil + }) + }); err != nil { + return emptyDesc, err + } + + return ocidesc, nil +} + +func uniqueRef() string { + t := time.Now() + var b [3]byte + // Ignore read failures, just decreases uniqueness + rand.Read(b[:]) + return fmt.Sprintf("%d-%s", t.UnixNano(), base64.URLEncoding.EncodeToString(b[:])) +} + +func prepareWinHeader(h *tar.Header) { + if h.PAXRecords == nil { + h.PAXRecords = map[string]string{} + } + if h.Typeflag == tar.TypeDir { + h.Mode |= 1 << 14 + h.PAXRecords[keyFileAttr] = "16" + } + + if h.Typeflag == tar.TypeReg { + h.Mode |= 1 << 15 + h.PAXRecords[keyFileAttr] = "32" + } + + if !h.ModTime.IsZero() { + h.PAXRecords[keyCreationTime] = fmt.Sprintf("%d.%d", h.ModTime.Unix(), h.ModTime.Nanosecond()) + } + + h.Format = tar.FormatPAX +} + +func addSecurityDescriptor(h *tar.Header) { + if h.Typeflag == tar.TypeDir { + // O:BAG:SYD:(A;OICI;FA;;;BA)(A;OICI;FA;;;SY)(A;;FA;;;BA)(A;OICIIO;GA;;;CO)(A;OICI;0x1200a9;;;BU)(A;CI;LC;;;BU)(A;CI;DC;;;BU) + h.PAXRecords[keySDRaw] = "AQAEgBQAAAAkAAAAAAAAADAAAAABAgAAAAAABSAAAAAgAgAAAQEAAAAAAAUSAAAAAgCoAAcAAAAAAxgA/wEfAAECAAAAAAAFIAAAACACAAAAAxQA/wEfAAEBAAAAAAAFEgAAAAAAGAD/AR8AAQIAAAAAAAUgAAAAIAIAAAALFAAAAAAQAQEAAAAAAAMAAAAAAAMYAKkAEgABAgAAAAAABSAAAAAhAgAAAAIYAAQAAAABAgAAAAAABSAAAAAhAgAAAAIYAAIAAAABAgAAAAAABSAAAAAhAgAA" + } + + if h.Typeflag == tar.TypeReg { + // O:BAG:SYD:(A;;FA;;;BA)(A;;FA;;;SY)(A;;0x1200a9;;;BU) + h.PAXRecords[keySDRaw] = "AQAEgBQAAAAkAAAAAAAAADAAAAABAgAAAAAABSAAAAAgAgAAAQEAAAAAAAUSAAAAAgBMAAMAAAAAABgA/wEfAAECAAAAAAAFIAAAACACAAAAABQA/wEfAAEBAAAAAAAFEgAAAAAAGACpABIAAQIAAAAAAAUgAAAAIQIAAA==" + } +} + +func makeWindowsLayer(w io.Writer) (io.Writer, func(error), chan error) { + pr, pw := io.Pipe() + done := make(chan error) + + go func() { + tarReader := tar.NewReader(pr) + tarWriter := tar.NewWriter(w) + + err := func() error { + + h := &tar.Header{ + Name: "Hives", + Typeflag: tar.TypeDir, + ModTime: time.Now(), + } + prepareWinHeader(h) + if err := tarWriter.WriteHeader(h); err != nil { + return err + } + + h = &tar.Header{ + Name: "Files", + Typeflag: tar.TypeDir, + ModTime: time.Now(), + } + prepareWinHeader(h) + if err := tarWriter.WriteHeader(h); err != nil { + return err + } + + for { + h, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + h.Name = "Files/" + h.Name + if h.Linkname != "" { + h.Linkname = "Files/" + h.Linkname + } + prepareWinHeader(h) + addSecurityDescriptor(h) + if err := tarWriter.WriteHeader(h); err != nil { + return err + } + if h.Size > 0 { + if _, err := io.Copy(tarWriter, tarReader); err != nil { + return err + } + } + } + return tarWriter.Close() + }() + if err != nil { + logrus.Errorf("makeWindowsLayer %+v", err) + } + pw.CloseWithError(err) + done <- err + return + }() + + discard := func(err error) { + pw.CloseWithError(err) + } + + return pw, discard, done +} diff --git a/vendor/github.com/moby/buildkit/worker/base/worker.go b/vendor/github.com/moby/buildkit/worker/base/worker.go new file mode 100644 index 00000000..96926bbe --- /dev/null +++ b/vendor/github.com/moby/buildkit/worker/base/worker.go @@ -0,0 +1,550 @@ +package base + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/gc" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/remotes/docker" + "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/blobs" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/exporter" + imageexporter "github.com/moby/buildkit/exporter/containerimage" + localexporter "github.com/moby/buildkit/exporter/local" + ociexporter "github.com/moby/buildkit/exporter/oci" + tarexporter "github.com/moby/buildkit/exporter/tar" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/snapshot/imagerefchecker" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/ops" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/source/containerimage" + "github.com/moby/buildkit/source/git" + "github.com/moby/buildkit/source/http" + "github.com/moby/buildkit/source/local" + "github.com/moby/buildkit/util/binfmt_misc" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + bolt "go.etcd.io/bbolt" + "golang.org/x/sync/errgroup" +) + +const labelCreatedAt = "buildkit/createdat" + +// TODO: this file should be removed. containerd defines ContainerdWorker, oci defines OCIWorker. There is no base worker. + +// WorkerOpt is specific to a worker. +// See also CommonOpt. +type WorkerOpt struct { + ID string + Labels map[string]string + Platforms []specs.Platform + GCPolicy []client.PruneInfo + MetadataStore *metadata.Store + Executor executor.Executor + Snapshotter snapshot.Snapshotter + ContentStore content.Store + Applier diff.Applier + Differ diff.Comparer + ImageStore images.Store // optional + RegistryHosts docker.RegistryHosts + IdentityMapping *idtools.IdentityMapping + LeaseManager leases.Manager + GarbageCollect func(context.Context) (gc.Stats, error) +} + +// Worker is a local worker instance with dedicated snapshotter, cache, and so on. +// TODO: s/Worker/OpWorker/g ? +type Worker struct { + WorkerOpt + CacheManager cache.Manager + SourceManager *source.Manager + imageWriter *imageexporter.ImageWriter + ImageSource source.Source +} + +// NewWorker instantiates a local worker +func NewWorker(opt WorkerOpt) (*Worker, error) { + imageRefChecker := imagerefchecker.New(imagerefchecker.Opt{ + ImageStore: opt.ImageStore, + ContentStore: opt.ContentStore, + }) + + cm, err := cache.NewManager(cache.ManagerOpt{ + Snapshotter: opt.Snapshotter, + MetadataStore: opt.MetadataStore, + PruneRefChecker: imageRefChecker, + Applier: opt.Applier, + GarbageCollect: opt.GarbageCollect, + LeaseManager: opt.LeaseManager, + ContentStore: opt.ContentStore, + }) + if err != nil { + return nil, err + } + + sm, err := source.NewManager() + if err != nil { + return nil, err + } + + is, err := containerimage.NewSource(containerimage.SourceOpt{ + Snapshotter: opt.Snapshotter, + ContentStore: opt.ContentStore, + Applier: opt.Applier, + ImageStore: opt.ImageStore, + CacheAccessor: cm, + RegistryHosts: opt.RegistryHosts, + LeaseManager: opt.LeaseManager, + }) + if err != nil { + return nil, err + } + + sm.Register(is) + + if err := git.Supported(); err == nil { + gs, err := git.NewSource(git.Opt{ + CacheAccessor: cm, + MetadataStore: opt.MetadataStore, + }) + if err != nil { + return nil, err + } + sm.Register(gs) + } else { + logrus.Warnf("git source cannot be enabled: %v", err) + } + + hs, err := http.NewSource(http.Opt{ + CacheAccessor: cm, + MetadataStore: opt.MetadataStore, + }) + if err != nil { + return nil, err + } + + sm.Register(hs) + + ss, err := local.NewSource(local.Opt{ + CacheAccessor: cm, + MetadataStore: opt.MetadataStore, + }) + if err != nil { + return nil, err + } + sm.Register(ss) + + iw, err := imageexporter.NewImageWriter(imageexporter.WriterOpt{ + Snapshotter: opt.Snapshotter, + ContentStore: opt.ContentStore, + Applier: opt.Applier, + Differ: opt.Differ, + }) + if err != nil { + return nil, err + } + + leases, err := opt.LeaseManager.List(context.TODO(), "labels.\"buildkit/lease.temporary\"") + if err != nil { + return nil, err + } + for _, l := range leases { + opt.LeaseManager.Delete(context.TODO(), l) + } + + return &Worker{ + WorkerOpt: opt, + CacheManager: cm, + SourceManager: sm, + imageWriter: iw, + ImageSource: is, + }, nil +} + +func (w *Worker) ContentStore() content.Store { + return w.WorkerOpt.ContentStore +} + +func (w *Worker) ID() string { + return w.WorkerOpt.ID +} + +func (w *Worker) Labels() map[string]string { + return w.WorkerOpt.Labels +} + +func (w *Worker) Platforms(noCache bool) []specs.Platform { + if noCache { + pm := make(map[string]struct{}, len(w.WorkerOpt.Platforms)) + for _, p := range w.WorkerOpt.Platforms { + pm[platforms.Format(p)] = struct{}{} + } + for _, p := range binfmt_misc.SupportedPlatforms(noCache) { + if _, ok := pm[p]; !ok { + pp, _ := platforms.Parse(p) + w.WorkerOpt.Platforms = append(w.WorkerOpt.Platforms, pp) + } + } + } + return w.WorkerOpt.Platforms +} + +func (w *Worker) GCPolicy() []client.PruneInfo { + return w.WorkerOpt.GCPolicy +} + +func (w *Worker) LoadRef(id string, hidden bool) (cache.ImmutableRef, error) { + var opts []cache.RefOption + if hidden { + opts = append(opts, cache.NoUpdateLastUsed) + } + return w.CacheManager.Get(context.TODO(), id, opts...) +} + +func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) { + if baseOp, ok := v.Sys().(*pb.Op); ok { + switch op := baseOp.Op.(type) { + case *pb.Op_Source: + return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, sm, w) + case *pb.Op_Exec: + return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager, sm, w.MetadataStore, w.Executor, w) + case *pb.Op_File: + return ops.NewFileOp(v, op, w.CacheManager, w.MetadataStore, w) + case *pb.Op_Build: + return ops.NewBuildOp(v, op, s, w) + default: + return nil, errors.Errorf("no support for %T", op) + } + } + return nil, errors.Errorf("could not resolve %v", v) +} + +func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error { + mu := ops.CacheMountsLocker() + mu.Lock() + defer mu.Unlock() + + for _, id := range ids { + id = "cache-dir:" + id + sis, err := w.MetadataStore.Search(id) + if err != nil { + return err + } + for _, si := range sis { + for _, k := range si.Indexes() { + if k == id || strings.HasPrefix(k, id+":") { + if siCached := w.CacheManager.Metadata(si.ID()); siCached != nil { + si = siCached + } + if err := cache.CachePolicyDefault(si); err != nil { + return err + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, k, nil) + }) + if err := si.Commit(); err != nil { + return err + } + // if ref is unused try to clean it up right away by releasing it + if mref, err := w.CacheManager.GetMutable(ctx, si.ID()); err == nil { + go mref.Release(context.TODO()) + } + break + } + } + } + } + + ops.ClearActiveCacheMounts() + return nil +} + +func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error) { + // ImageSource is typically source/containerimage + resolveImageConfig, ok := w.ImageSource.(resolveImageConfig) + if !ok { + return "", nil, errors.Errorf("worker %q does not implement ResolveImageConfig", w.ID()) + } + return resolveImageConfig.ResolveImageConfig(ctx, ref, opt, sm) +} + +type resolveImageConfig interface { + ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error) +} + +func (w *Worker) Exec(ctx context.Context, meta executor.Meta, rootFS cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error { + active, err := w.CacheManager.New(ctx, rootFS) + if err != nil { + return err + } + defer active.Release(context.TODO()) + return w.Executor.Exec(ctx, meta, active, nil, stdin, stdout, stderr) +} + +func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) { + return w.CacheManager.DiskUsage(ctx, opt) +} + +func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, opt ...client.PruneInfo) error { + return w.CacheManager.Prune(ctx, ch, opt...) +} + +func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) { + switch name { + case client.ExporterImage: + return imageexporter.New(imageexporter.Opt{ + Images: w.ImageStore, + SessionManager: sm, + ImageWriter: w.imageWriter, + RegistryHosts: w.RegistryHosts, + LeaseManager: w.LeaseManager, + }) + case client.ExporterLocal: + return localexporter.New(localexporter.Opt{ + SessionManager: sm, + }) + case client.ExporterTar: + return tarexporter.New(tarexporter.Opt{ + SessionManager: sm, + }) + case client.ExporterOCI: + return ociexporter.New(ociexporter.Opt{ + SessionManager: sm, + ImageWriter: w.imageWriter, + Variant: ociexporter.VariantOCI, + LeaseManager: w.LeaseManager, + }) + case client.ExporterDocker: + return ociexporter.New(ociexporter.Opt{ + SessionManager: sm, + ImageWriter: w.imageWriter, + Variant: ociexporter.VariantDocker, + LeaseManager: w.LeaseManager, + }) + default: + return nil, errors.Errorf("exporter %q could not be found", name) + } +} + +func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error) { + ctx, done, err := leaseutil.WithLease(ctx, w.LeaseManager, leaseutil.MakeTemporary) + if err != nil { + return nil, err + } + defer done(ctx) + + // TODO(fuweid): add compression option or config for cache exporter. + diffPairs, err := blobs.GetDiffPairs(ctx, w.ContentStore(), w.Differ, ref, createIfNeeded, blobs.DefaultCompression) + if err != nil { + return nil, errors.Wrap(err, "failed calculating diff pairs for exported snapshot") + } + if len(diffPairs) == 0 { + return nil, nil + } + + createdTimes := getCreatedTimes(ref) + if len(createdTimes) != len(diffPairs) { + return nil, errors.Errorf("invalid createdtimes/diffpairs") + } + + descs := make([]ocispec.Descriptor, len(diffPairs)) + + cs := w.ContentStore() + layerMediaTypes := blobs.GetMediaTypeForLayers(diffPairs, ref) + for i, dp := range diffPairs { + info, err := cs.Info(ctx, dp.Blobsum) + if err != nil { + return nil, err + } + + tm, err := createdTimes[i].MarshalText() + if err != nil { + return nil, err + } + + var mediaType string + if len(layerMediaTypes) > i { + mediaType = layerMediaTypes[i] + } + + // NOTE: The media type might be missing for some migrated ones + // from before lease based storage. If so, we should detect + // the media type from blob data. + // + // Discussion: https://github.com/moby/buildkit/pull/1277#discussion_r352795429 + if mediaType == "" { + mediaType, err = blobs.DetectLayerMediaType(ctx, cs, dp.Blobsum, false) + if err != nil { + return nil, err + } + } + + descs[i] = ocispec.Descriptor{ + Digest: dp.Blobsum, + Size: info.Size, + MediaType: mediaType, + Annotations: map[string]string{ + "containerd.io/uncompressed": dp.DiffID.String(), + labelCreatedAt: string(tm), + }, + } + } + + return &solver.Remote{ + Descriptors: descs, + Provider: cs, + }, nil +} + +func getCreatedTimes(ref cache.ImmutableRef) (out []time.Time) { + parent := ref.Parent() + if parent != nil { + defer parent.Release(context.TODO()) + out = getCreatedTimes(parent) + } + return append(out, cache.GetCreatedAt(ref.Metadata())) +} + +func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (ref cache.ImmutableRef, err error) { + ctx, done, err := leaseutil.WithLease(ctx, w.LeaseManager, leaseutil.MakeTemporary) + if err != nil { + return nil, err + } + defer done(ctx) + + eg, gctx := errgroup.WithContext(ctx) + for _, desc := range remote.Descriptors { + func(desc ocispec.Descriptor) { + eg.Go(func() error { + done := oneOffProgress(ctx, fmt.Sprintf("pulling %s", desc.Digest)) + if err := contentutil.Copy(gctx, w.ContentStore(), remote.Provider, desc); err != nil { + return done(err) + } + if ref, ok := desc.Annotations["containerd.io/distribution.source.ref"]; ok { + hf, err := docker.AppendDistributionSourceLabel(w.ContentStore(), ref) + if err != nil { + return done(err) + } + _, err = hf(ctx, desc) + return done(err) + } + return done(nil) + }) + }(desc) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + unpackProgressDone := oneOffProgress(ctx, "unpacking") + defer func() { + err = unpackProgressDone(err) + }() + var current cache.ImmutableRef + for i, desc := range remote.Descriptors { + tm := time.Now() + if tmstr, ok := desc.Annotations[labelCreatedAt]; ok { + if err := (&tm).UnmarshalText([]byte(tmstr)); err != nil { + if current != nil { + current.Release(context.TODO()) + } + return nil, err + } + } + descr := fmt.Sprintf("imported %s", remote.Descriptors[i].Digest) + if v, ok := desc.Annotations["buildkit/description"]; ok { + descr = v + } + ref, err := w.CacheManager.GetByBlob(ctx, desc, current, cache.WithDescription(descr), cache.WithCreationTime(tm)) + if current != nil { + current.Release(context.TODO()) + } + if err != nil { + return nil, err + } + if err := ref.Extract(ctx); err != nil { + ref.Release(context.TODO()) + return nil, err + } + current = ref + } + return current, nil +} + +// Labels returns default labels +// utility function. could be moved to the constructor logic? +func Labels(executor, snapshotter string) map[string]string { + hostname, err := os.Hostname() + if err != nil { + hostname = "unknown" + } + labels := map[string]string{ + worker.LabelExecutor: executor, + worker.LabelSnapshotter: snapshotter, + worker.LabelHostname: hostname, + } + return labels +} + +// ID reads the worker id from the `workerid` file. +// If not exist, it creates a random one, +func ID(root string) (string, error) { + f := filepath.Join(root, "workerid") + b, err := ioutil.ReadFile(f) + if err != nil { + if os.IsNotExist(err) { + id := identity.NewID() + err := ioutil.WriteFile(f, []byte(id), 0400) + return id, err + } else { + return "", err + } + } + return string(b), nil +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.FromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} diff --git a/vendor/github.com/moby/buildkit/worker/cacheresult.go b/vendor/github.com/moby/buildkit/worker/cacheresult.go new file mode 100644 index 00000000..d0ee1a2a --- /dev/null +++ b/vendor/github.com/moby/buildkit/worker/cacheresult.go @@ -0,0 +1,100 @@ +package worker + +import ( + "context" + "strings" + "time" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/solver" + "github.com/pkg/errors" +) + +func NewCacheResultStorage(wc *Controller) solver.CacheResultStorage { + return &cacheResultStorage{ + wc: wc, + } +} + +type cacheResultStorage struct { + wc *Controller +} + +func (s *cacheResultStorage) Save(res solver.Result, createdAt time.Time) (solver.CacheResult, error) { + ref, ok := res.Sys().(*WorkerRef) + if !ok { + return solver.CacheResult{}, errors.Errorf("invalid result: %T", res.Sys()) + } + if ref.ImmutableRef != nil { + if !cache.HasCachePolicyRetain(ref.ImmutableRef) { + if err := cache.CachePolicyRetain(ref.ImmutableRef); err != nil { + return solver.CacheResult{}, err + } + ref.ImmutableRef.Metadata().Commit() + } + } + return solver.CacheResult{ID: ref.ID(), CreatedAt: createdAt}, nil +} +func (s *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) (solver.Result, error) { + return s.load(res.ID, false) +} + +func (s *cacheResultStorage) getWorkerRef(id string) (Worker, string, error) { + workerID, refID, err := parseWorkerRef(id) + if err != nil { + return nil, "", err + } + w, err := s.wc.Get(workerID) + if err != nil { + return nil, "", err + } + return w, refID, nil +} + +func (s *cacheResultStorage) load(id string, hidden bool) (solver.Result, error) { + w, refID, err := s.getWorkerRef(id) + if err != nil { + return nil, err + } + if refID == "" { + return NewWorkerRefResult(nil, w), nil + } + ref, err := w.LoadRef(refID, hidden) + if err != nil { + return nil, err + } + return NewWorkerRefResult(ref, w), nil +} + +func (s *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult) (*solver.Remote, error) { + w, refID, err := s.getWorkerRef(res.ID) + if err != nil { + return nil, err + } + ref, err := w.LoadRef(refID, true) + if err != nil { + return nil, err + } + defer ref.Release(context.TODO()) + remote, err := w.GetRemote(ctx, ref, false) + if err != nil { + return nil, nil // ignore error. loadRemote is best effort + } + return remote, nil +} +func (s *cacheResultStorage) Exists(id string) bool { + ref, err := s.load(id, true) + if err != nil { + return false + } + ref.Release(context.TODO()) + return true +} + +func parseWorkerRef(id string) (string, string, error) { + parts := strings.Split(id, "::") + if len(parts) != 2 { + return "", "", errors.Errorf("invalid workerref id: %s", id) + } + return parts[0], parts[1], nil +} diff --git a/vendor/github.com/moby/buildkit/worker/filter.go b/vendor/github.com/moby/buildkit/worker/filter.go new file mode 100644 index 00000000..c94a6265 --- /dev/null +++ b/vendor/github.com/moby/buildkit/worker/filter.go @@ -0,0 +1,33 @@ +package worker + +import ( + "strings" + + "github.com/containerd/containerd/filters" +) + +func adaptWorker(w Worker) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "id": + return w.ID(), len(w.ID()) > 0 + case "labels": + return checkMap(fieldpath[1:], w.Labels()) + } + + return "", false + }) +} + +func checkMap(fieldpath []string, m map[string]string) (string, bool) { + if len(m) == 0 { + return "", false + } + + value, ok := m[strings.Join(fieldpath, ".")] + return value, ok +} diff --git a/vendor/github.com/moby/buildkit/worker/result.go b/vendor/github.com/moby/buildkit/worker/result.go new file mode 100644 index 00000000..9aa6af41 --- /dev/null +++ b/vendor/github.com/moby/buildkit/worker/result.go @@ -0,0 +1,40 @@ +package worker + +import ( + "context" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/solver" +) + +func NewWorkerRefResult(ref cache.ImmutableRef, worker Worker) solver.Result { + return &workerRefResult{&WorkerRef{ImmutableRef: ref, Worker: worker}} +} + +type WorkerRef struct { + ImmutableRef cache.ImmutableRef + Worker Worker +} + +func (wr *WorkerRef) ID() string { + refID := "" + if wr.ImmutableRef != nil { + refID = wr.ImmutableRef.ID() + } + return wr.Worker.ID() + "::" + refID +} + +type workerRefResult struct { + *WorkerRef +} + +func (r *workerRefResult) Release(ctx context.Context) error { + if r.ImmutableRef == nil { + return nil + } + return r.ImmutableRef.Release(ctx) +} + +func (r *workerRefResult) Sys() interface{} { + return r.WorkerRef +} diff --git a/vendor/github.com/moby/buildkit/worker/worker.go b/vendor/github.com/moby/buildkit/worker/worker.go new file mode 100644 index 00000000..6505e307 --- /dev/null +++ b/vendor/github.com/moby/buildkit/worker/worker.go @@ -0,0 +1,48 @@ +package worker + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +type Worker interface { + // ID needs to be unique in the cluster + ID() string + Labels() map[string]string + Platforms(noCache bool) []specs.Platform + + GCPolicy() []client.PruneInfo + LoadRef(id string, hidden bool) (cache.ImmutableRef, error) + // ResolveOp resolves Vertex.Sys() to Op implementation. + ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) + ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error) + // Exec is similar to executor.Exec but without []mount.Mount + Exec(ctx context.Context, meta executor.Meta, rootFS cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) error + DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) + Exporter(name string, sm *session.Manager) (exporter.Exporter, error) + Prune(ctx context.Context, ch chan client.UsageInfo, opt ...client.PruneInfo) error + GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error) + FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) + PruneCacheMounts(ctx context.Context, ids []string) error + ContentStore() content.Store +} + +// Pre-defined label keys +const ( + labelPrefix = "org.mobyproject.buildkit.worker." + LabelExecutor = labelPrefix + "executor" // "oci" or "containerd" + LabelSnapshotter = labelPrefix + "snapshotter" // containerd snapshotter name ("overlay", "native", ...) + LabelHostname = labelPrefix + "hostname" +) diff --git a/vendor/github.com/moby/buildkit/worker/workercontroller.go b/vendor/github.com/moby/buildkit/worker/workercontroller.go new file mode 100644 index 00000000..5389f58d --- /dev/null +++ b/vendor/github.com/moby/buildkit/worker/workercontroller.go @@ -0,0 +1,77 @@ +package worker + +import ( + "sync" + + "github.com/containerd/containerd/filters" + "github.com/moby/buildkit/client" + "github.com/pkg/errors" +) + +// Controller holds worker instances. +// Currently, only local workers are supported. +type Controller struct { + // TODO: define worker interface and support remote ones + workers sync.Map + defaultID string +} + +// Add adds a local worker +func (c *Controller) Add(w Worker) error { + c.workers.Store(w.ID(), w) + if c.defaultID == "" { + c.defaultID = w.ID() + } + return nil +} + +// List lists workers +func (c *Controller) List(filterStrings ...string) ([]Worker, error) { + filter, err := filters.ParseAll(filterStrings...) + if err != nil { + return nil, err + } + var workers []Worker + c.workers.Range(func(k, v interface{}) bool { + w := v.(Worker) + if filter.Match(adaptWorker(w)) { + workers = append(workers, w) + } + return true + }) + return workers, nil +} + +// GetDefault returns the default local worker +func (c *Controller) GetDefault() (Worker, error) { + if c.defaultID == "" { + return nil, errors.Errorf("no default worker") + } + return c.Get(c.defaultID) +} + +func (c *Controller) Get(id string) (Worker, error) { + v, ok := c.workers.Load(id) + if !ok { + return nil, errors.Errorf("worker %s not found", id) + } + return v.(Worker), nil +} + +// TODO: add Get(Constraint) (*Worker, error) + +func (c *Controller) WorkerInfos() []client.WorkerInfo { + workers, err := c.List() + if err != nil { + return nil + } + out := make([]client.WorkerInfo, 0, len(workers)) + for _, w := range workers { + out = append(out, client.WorkerInfo{ + ID: w.ID(), + Labels: w.Labels(), + Platforms: w.Platforms(true), + }) + } + return out +} diff --git a/vendor/github.com/moby/sys/mount/doc.go b/vendor/github.com/moby/sys/mount/doc.go new file mode 100644 index 00000000..86c2e01b --- /dev/null +++ b/vendor/github.com/moby/sys/mount/doc.go @@ -0,0 +1,4 @@ +// Package mount provides a set of functions to mount and unmount mounts. +// +// Currently it supports Linux. For historical reasons, there is also some support for FreeBSD. +package mount diff --git a/vendor/github.com/moby/sys/mount/flags_freebsd.go b/vendor/github.com/moby/sys/mount/flags_bsd.go similarity index 79% rename from vendor/github.com/moby/sys/mount/flags_freebsd.go rename to vendor/github.com/moby/sys/mount/flags_bsd.go index 69c50680..27d8440a 100644 --- a/vendor/github.com/moby/sys/mount/flags_freebsd.go +++ b/vendor/github.com/moby/sys/mount/flags_bsd.go @@ -1,28 +1,25 @@ -// +build freebsd,cgo +// +build freebsd openbsd package mount -/* -#include -*/ -import "C" +import "golang.org/x/sys/unix" const ( // RDONLY will mount the filesystem as read-only. - RDONLY = C.MNT_RDONLY + RDONLY = unix.MNT_RDONLY // NOSUID will not allow set-user-identifier or set-group-identifier bits to // take effect. - NOSUID = C.MNT_NOSUID + NOSUID = unix.MNT_NOSUID // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = C.MNT_NOEXEC + NOEXEC = unix.MNT_NOEXEC // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. - SYNCHRONOUS = C.MNT_SYNCHRONOUS + SYNCHRONOUS = unix.MNT_SYNCHRONOUS // NOATIME will not update the file access time when reading from a file. - NOATIME = C.MNT_NOATIME + NOATIME = unix.MNT_NOATIME ) // These flags are unsupported. diff --git a/vendor/github.com/moby/sys/mount/flags.go b/vendor/github.com/moby/sys/mount/flags_unix.go similarity index 99% rename from vendor/github.com/moby/sys/mount/flags.go rename to vendor/github.com/moby/sys/mount/flags_unix.go index d514a9d8..995d7280 100644 --- a/vendor/github.com/moby/sys/mount/flags.go +++ b/vendor/github.com/moby/sys/mount/flags_unix.go @@ -1,3 +1,5 @@ +// +build !darwin,!windows + package mount import ( diff --git a/vendor/github.com/moby/sys/mount/flags_unsupported.go b/vendor/github.com/moby/sys/mount/flags_unsupported.go deleted file mode 100644 index e1d64f6b..00000000 --- a/vendor/github.com/moby/sys/mount/flags_unsupported.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !linux,!freebsd freebsd,!cgo - -package mount - -// These flags are unsupported. -const ( - BIND = 0 - DIRSYNC = 0 - MANDLOCK = 0 - NOATIME = 0 - NODEV = 0 - NODIRATIME = 0 - NOEXEC = 0 - NOSUID = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SHARED = 0 - RSHARED = 0 - SLAVE = 0 - RSLAVE = 0 - RBIND = 0 - RELATIME = 0 - REMOUNT = 0 - STRICTATIME = 0 - SYNCHRONOUS = 0 - RDONLY = 0 - mntDetach = 0 -) diff --git a/vendor/github.com/moby/sys/mount/go.mod b/vendor/github.com/moby/sys/mount/go.mod index 21cef0a9..e1e03a37 100644 --- a/vendor/github.com/moby/sys/mount/go.mod +++ b/vendor/github.com/moby/sys/mount/go.mod @@ -3,6 +3,6 @@ module github.com/moby/sys/mount go 1.14 require ( - github.com/moby/sys/mountinfo v0.1.0 - golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae + github.com/moby/sys/mountinfo v0.4.0 + golang.org/x/sys v0.0.0-20200922070232-aee5d888a860 ) diff --git a/vendor/github.com/moby/sys/mount/go.sum b/vendor/github.com/moby/sys/mount/go.sum index ae99f859..7c39d597 100644 --- a/vendor/github.com/moby/sys/mount/go.sum +++ b/vendor/github.com/moby/sys/mount/go.sum @@ -1,4 +1,5 @@ -github.com/moby/sys/mountinfo v0.1.0 h1:r8vMRbMAFEAfiNptYVokP+nfxPJzvRuia5e2vzXtENo= -github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +github.com/moby/sys/mountinfo v0.4.0 h1:1KInV3Huv18akCu58V7lzNlt+jFmqlu1EaErnEHE/VM= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860 h1:YEu4SMq7D0cmT7CBbXfcH0NZeuChAXwsHe/9XueUO6o= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/moby/sys/mount/mount.go b/vendor/github.com/moby/sys/mount/mount.go deleted file mode 100644 index 883f8df3..00000000 --- a/vendor/github.com/moby/sys/mount/mount.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build go1.13 - -package mount - -import ( - "fmt" - "sort" - - "github.com/moby/sys/mountinfo" -) - -// Mount will mount filesystem according to the specified configuration. -// Options must be specified like the mount or fstab unix commands: -// "opt1=val1,opt2=val2". See flags.go for supported option flags. -func Mount(device, target, mType, options string) error { - flag, data := parseOptions(options) - return mount(device, target, mType, uintptr(flag), data) -} - -// Unmount lazily unmounts a filesystem on supported platforms, otherwise -// does a normal unmount. -func Unmount(target string) error { - return unmount(target, mntDetach) -} - -// RecursiveUnmount unmounts the target and all mounts underneath, starting with -// the deepsest mount first. -func RecursiveUnmount(target string) error { - mounts, err := mountinfo.GetMounts(mountinfo.PrefixFilter(target)) - if err != nil { - return err - } - - // Make the deepest mount be first - sort.Slice(mounts, func(i, j int) bool { - return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) - }) - - var suberr error - for i, m := range mounts { - err = unmount(m.Mountpoint, mntDetach) - if err != nil { - if i == len(mounts)-1 { // last mount - return fmt.Errorf("%w (possible cause: %s)", err, suberr) - } - // This is a submount, we can ignore the error for now, - // the final unmount will fail if this is a real problem. - // With that in mind, the _first_ failed unmount error - // might be the real error cause, so let's keep it. - if suberr == nil { - suberr = err - } - } - } - return nil -} diff --git a/vendor/github.com/moby/sys/mount/mount_unix.go b/vendor/github.com/moby/sys/mount/mount_unix.go new file mode 100644 index 00000000..a250bfc8 --- /dev/null +++ b/vendor/github.com/moby/sys/mount/mount_unix.go @@ -0,0 +1,87 @@ +// +build !darwin,!windows + +package mount + +import ( + "fmt" + "sort" + + "github.com/moby/sys/mountinfo" + "golang.org/x/sys/unix" +) + +// Mount will mount filesystem according to the specified configuration. +// Options must be specified like the mount or fstab unix commands: +// "opt1=val1,opt2=val2". See flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, data := parseOptions(options) + return mount(device, target, mType, uintptr(flag), data) +} + +// Unmount lazily unmounts a filesystem on supported platforms, otherwise does +// a normal unmount. If target is not a mount point, no error is returned. +func Unmount(target string) error { + err := unix.Unmount(target, mntDetach) + if err == nil || err == unix.EINVAL { + // Ignore "not mounted" error here. Note the same error + // can be returned if flags are invalid, so this code + // assumes that the flags value is always correct. + return nil + } + + return &mountError{ + op: "umount", + target: target, + flags: uintptr(mntDetach), + err: err, + } +} + +// RecursiveUnmount unmounts the target and all mounts underneath, starting +// with the deepest mount first. The argument does not have to be a mount +// point itself. +func RecursiveUnmount(target string) error { + // Fast path, works if target is a mount point that can be unmounted. + // On Linux, mntDetach flag ensures a recursive unmount. For other + // platforms, if there are submounts, we'll get EBUSY (and fall back + // to the slow path). NOTE we do not ignore EINVAL here as target might + // not be a mount point itself (but there can be mounts underneath). + if err := unix.Unmount(target, mntDetach); err == nil { + return nil + } + + // Slow path: get all submounts, sort, unmount one by one. + mounts, err := mountinfo.GetMounts(mountinfo.PrefixFilter(target)) + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Slice(mounts, func(i, j int) bool { + return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) + }) + + var ( + suberr error + lastMount = len(mounts) - 1 + ) + for i, m := range mounts { + err = Unmount(m.Mountpoint) + if err != nil { + if i == lastMount { + if suberr != nil { + return fmt.Errorf("%w (possible cause: %s)", err, suberr) + } + return err + } + // This is a submount, we can ignore the error for now, + // the final unmount will fail if this is a real problem. + // With that in mind, the _first_ failed unmount error + // might be the real error cause, so let's keep it. + if suberr == nil { + suberr = err + } + } + } + return nil +} diff --git a/vendor/github.com/moby/sys/mount/mounter_freebsd.go b/vendor/github.com/moby/sys/mount/mounter_bsd.go similarity index 97% rename from vendor/github.com/moby/sys/mount/mounter_freebsd.go rename to vendor/github.com/moby/sys/mount/mounter_bsd.go index 3964af4f..656b762f 100644 --- a/vendor/github.com/moby/sys/mount/mounter_freebsd.go +++ b/vendor/github.com/moby/sys/mount/mounter_bsd.go @@ -1,3 +1,5 @@ +// +build freebsd,cgo openbsd,cgo + package mount /* diff --git a/vendor/github.com/moby/sys/mount/mounter_unsupported.go b/vendor/github.com/moby/sys/mount/mounter_unsupported.go index 15380671..e7ff5bd9 100644 --- a/vendor/github.com/moby/sys/mount/mounter_unsupported.go +++ b/vendor/github.com/moby/sys/mount/mounter_unsupported.go @@ -1,7 +1,7 @@ -// +build !linux,!freebsd freebsd,!cgo +// +build !linux,!freebsd,!openbsd,!windows freebsd,!cgo openbsd,!cgo package mount func mount(device, target, mType string, flag uintptr, data string) error { - panic("Not implemented") + panic("cgo required on freebsd and openbsd") } diff --git a/vendor/github.com/moby/sys/mount/unmount_unix.go b/vendor/github.com/moby/sys/mount/unmount_unix.go deleted file mode 100644 index 1d1afeee..00000000 --- a/vendor/github.com/moby/sys/mount/unmount_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !windows - -package mount - -import "golang.org/x/sys/unix" - -func unmount(target string, flags int) error { - err := unix.Unmount(target, flags) - if err == nil || err == unix.EINVAL { - // Ignore "not mounted" error here. Note the same error - // can be returned if flags are invalid, so this code - // assumes that the flags value is always correct. - return nil - } - - return &mountError{ - op: "umount", - target: target, - flags: uintptr(flags), - err: err, - } -} diff --git a/vendor/github.com/moby/sys/mount/unmount_unsupported.go b/vendor/github.com/moby/sys/mount/unmount_unsupported.go deleted file mode 100644 index eebc4ab8..00000000 --- a/vendor/github.com/moby/sys/mount/unmount_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build windows - -package mount - -func unmount(target string, flag int) error { - panic("Not implemented") -} diff --git a/vendor/github.com/moby/sys/mountinfo/doc.go b/vendor/github.com/moby/sys/mountinfo/doc.go new file mode 100644 index 00000000..b80e05ef --- /dev/null +++ b/vendor/github.com/moby/sys/mountinfo/doc.go @@ -0,0 +1,44 @@ +// Package mountinfo provides a set of functions to retrieve information about OS mounts. +// +// Currently it supports Linux. For historical reasons, there is also some support for FreeBSD and OpenBSD, +// and a shallow implementation for Windows, but in general this is Linux-only package, so +// the rest of the document only applies to Linux, unless explicitly specified otherwise. +// +// In Linux, information about mounts seen by the current process is available from +// /proc/self/mountinfo. Note that due to mount namespaces, different processes can +// see different mounts. A per-process mountinfo table is available from /proc//mountinfo, +// where is a numerical process identifier. +// +// In general, /proc is not a very efficient interface, and mountinfo is not an exception. +// For example, there is no way to get information about a specific mount point (i.e. it +// is all-or-nothing). This package tries to hide the /proc ineffectiveness by using +// parse filters while reading mountinfo. A filter can skip some entries, or stop +// processing the rest of the file once the needed information is found. +// +// For mountinfo filters that accept path as an argument, the path must be absolute, +// having all symlinks resolved, and being cleaned (i.e. no extra slashes or dots). +// One way to achieve all of the above is to employ filepath.Abs followed by +// filepath.EvalSymlinks (the latter calls filepath.Clean on the result so +// there is no need to explicitly call filepath.Clean). +// +// NOTE that in many cases there is no need to consult mountinfo at all. Here are some +// of the cases where mountinfo should not be parsed: +// +// 1. Before performing a mount. Usually, this is not needed, but if required (say to +// prevent over-mounts), to check whether a directory is mounted, call os.Lstat +// on it and its parent directory, and compare their st.Sys().(*syscall.Stat_t).Dev +// fields -- if they differ, then the directory is the mount point. NOTE this does +// not work for bind mounts. Optionally, the filesystem type can also be checked +// by calling unix.Statfs and checking the Type field (i.e. filesystem type). +// +// 2. After performing a mount. If there is no error returned, the mount succeeded; +// checking the mount table for a new mount is redundant and expensive. +// +// 3. Before performing an unmount. It is more efficient to do an unmount and ignore +// a specific error (EINVAL) which tells the directory is not mounted. +// +// 4. After performing an unmount. If there is no error returned, the unmount succeeded. +// +// 5. To find the mount point root of a specific directory. You can perform os.Stat() +// on the directory and traverse up until the Dev field of a parent directory differs. +package mountinfo diff --git a/vendor/github.com/moby/sys/mountinfo/go.mod b/vendor/github.com/moby/sys/mountinfo/go.mod index 10d9a15a..9749ea96 100644 --- a/vendor/github.com/moby/sys/mountinfo/go.mod +++ b/vendor/github.com/moby/sys/mountinfo/go.mod @@ -1,3 +1,5 @@ module github.com/moby/sys/mountinfo go 1.14 + +require golang.org/x/sys v0.0.0-20200909081042-eff7692f9009 diff --git a/vendor/github.com/moby/sys/mountinfo/go.sum b/vendor/github.com/moby/sys/mountinfo/go.sum new file mode 100644 index 00000000..2a5be7ea --- /dev/null +++ b/vendor/github.com/moby/sys/mountinfo/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009 h1:W0lCpv29Hv0UaM1LXb9QlBHLNP8UFfcKjblhVCWftOM= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go new file mode 100644 index 00000000..bc9f6b2a --- /dev/null +++ b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go @@ -0,0 +1,58 @@ +package mountinfo + +import ( + "os" + "path/filepath" + + "golang.org/x/sys/unix" +) + +// mountedByOpenat2 is a method of detecting a mount that works for all kinds +// of mounts (incl. bind mounts), but requires a recent (v5.6+) linux kernel. +func mountedByOpenat2(path string) (bool, error) { + dir, last := filepath.Split(path) + + dirfd, err := unix.Openat2(unix.AT_FDCWD, dir, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC, + }) + if err != nil { + if err == unix.ENOENT { // not a mount + return false, nil + } + return false, &os.PathError{Op: "openat2", Path: dir, Err: err} + } + fd, err := unix.Openat2(dirfd, last, &unix.OpenHow{ + Flags: unix.O_PATH | unix.O_CLOEXEC | unix.O_NOFOLLOW, + Resolve: unix.RESOLVE_NO_XDEV, + }) + _ = unix.Close(dirfd) + switch err { + case nil: // definitely not a mount + _ = unix.Close(fd) + return false, nil + case unix.EXDEV: // definitely a mount + return true, nil + case unix.ENOENT: // not a mount + return false, nil + } + // not sure + return false, &os.PathError{Op: "openat2", Path: path, Err: err} +} + +func mounted(path string) (bool, error) { + // Try a fast path, using openat2() with RESOLVE_NO_XDEV. + mounted, err := mountedByOpenat2(path) + if err == nil { + return mounted, nil + } + // Another fast path: compare st.st_dev fields. + mounted, err = mountedByStat(path) + // This does not work for bind mounts, so false negative + // is possible, therefore only trust if return is true. + if mounted && err == nil { + return mounted, nil + } + + // Fallback to parsing mountinfo + return mountedByMountinfo(path) +} diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_unix.go b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go new file mode 100644 index 00000000..efb03978 --- /dev/null +++ b/vendor/github.com/moby/sys/mountinfo/mounted_unix.go @@ -0,0 +1,66 @@ +// +build linux freebsd,cgo openbsd,cgo + +package mountinfo + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "golang.org/x/sys/unix" +) + +func mountedByStat(path string) (bool, error) { + var st unix.Stat_t + + if err := unix.Lstat(path, &st); err != nil { + if err == unix.ENOENT { + // Treat ENOENT as "not mounted". + return false, nil + } + return false, &os.PathError{Op: "stat", Path: path, Err: err} + } + dev := st.Dev + parent := filepath.Dir(path) + if err := unix.Lstat(parent, &st); err != nil { + return false, &os.PathError{Op: "stat", Path: parent, Err: err} + } + if dev != st.Dev { + // Device differs from that of parent, + // so definitely a mount point. + return true, nil + } + // NB: this does not detect bind mounts on Linux. + return false, nil +} + +func normalizePath(path string) (realPath string, err error) { + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err) + } + if _, err := os.Stat(realPath); err != nil { + return "", fmt.Errorf("failed to stat target of %q: %w", path, err) + } + return realPath, nil +} + +func mountedByMountinfo(path string) (bool, error) { + path, err := normalizePath(path) + if err != nil { + if errors.Is(err, unix.ENOENT) { + // treat ENOENT as "not mounted" + return false, nil + } + return false, err + } + entries, err := GetMounts(SingleEntryFilter(path)) + if err != nil { + return false, err + } + + return len(entries) > 0, nil +} diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo.go b/vendor/github.com/moby/sys/mountinfo/mountinfo.go index ec3b5934..fe828c8f 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo.go @@ -1,20 +1,26 @@ package mountinfo +import ( + "os" +) + // GetMounts retrieves a list of mounts for the current running process, // with an optional filter applied (use nil for no filter). func GetMounts(f FilterFunc) ([]*Info, error) { return parseMountTable(f) } -// Mounted determines if a specified mountpoint has been mounted. -// On Linux it looks at /proc/self/mountinfo. -func Mounted(mountpoint string) (bool, error) { - entries, err := GetMounts(SingleEntryFilter(mountpoint)) - if err != nil { - return false, err +// Mounted determines if a specified path is a mount point. +// +// The argument must be an absolute path, with all symlinks resolved, and clean. +// One way to ensure it is to process the path using filepath.Abs followed by +// filepath.EvalSymlinks before calling this function. +func Mounted(path string) (bool, error) { + // root is always mounted + if path == string(os.PathSeparator) { + return true, nil } - - return len(entries) > 0, nil + return mounted(path) } // Info reveals information about a particular mounted filesystem. This @@ -40,18 +46,18 @@ type Info struct { // Mountpoint indicates the mount point relative to the process's root. Mountpoint string - // Opts represents mount-specific options. - Opts string + // Options represents mount-specific options. + Options string // Optional represents optional fields. Optional string - // Fstype indicates the type of filesystem, such as EXT3. - Fstype string + // FSType indicates the type of filesystem, such as EXT3. + FSType string // Source indicates filesystem specific information or "none". Source string - // VfsOpts represents per super block options. - VfsOpts string + // VFSOptions represents per super block options. + VFSOptions string } diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsd.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go similarity index 73% rename from vendor/github.com/moby/sys/mountinfo/mountinfo_freebsd.go rename to vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go index ea02568b..b1c12d02 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_freebsd.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go @@ -1,3 +1,5 @@ +// +build freebsd,cgo openbsd,cgo + package mountinfo /* @@ -33,7 +35,8 @@ func parseMountTable(filter FilterFunc) ([]*Info, error) { var mountinfo Info var skip, stop bool mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) - mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + mountinfo.FSType = C.GoString(&entry.f_fstypename[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) if filter != nil { // filter out entries we're not interested in @@ -43,8 +46,6 @@ func parseMountTable(filter FilterFunc) ([]*Info, error) { } } - mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) - out = append(out, &mountinfo) if stop { break @@ -52,3 +53,15 @@ func parseMountTable(filter FilterFunc) ([]*Info, error) { } return out, nil } + +func mounted(path string) (bool, error) { + // Fast path: compare st.st_dev fields. + // This should always work for FreeBSD and OpenBSD. + mounted, err := mountedByStat(path) + if err == nil { + return mounted, nil + } + + // Fallback to parsing mountinfo + return mountedByMountinfo(path) +} diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go index 96d49414..5869b2ce 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go @@ -3,17 +3,19 @@ package mountinfo import "strings" // FilterFunc is a type defining a callback function for GetMount(), -// used to filter out mountinfo entries we're not interested in. +// used to filter out mountinfo entries we're not interested in, +// and/or stop further processing if we found what we wanted. // -// It takes a pointer to the Info struct (not fully populated, -// currently only Mountpoint is filled in), and returns two booleans: +// It takes a pointer to the Info struct (fully populated with all available +// fields on the GOOS platform), and returns two booleans: // -// - skip: true if the entry should be skipped -// - stop: true if parsing should be stopped after the entry +// skip: true if the entry should be skipped; +// +// stop: true if parsing should be stopped after the entry. type FilterFunc func(*Info) (skip, stop bool) // PrefixFilter discards all entries whose mount points -// do not start with a specific prefix +// do not start with a specific prefix. func PrefixFilter(prefix string) FilterFunc { return func(m *Info) (bool, bool) { skip := !strings.HasPrefix(m.Mountpoint, prefix) @@ -21,7 +23,7 @@ func PrefixFilter(prefix string) FilterFunc { } } -// SingleEntryFilter looks for a specific entry +// SingleEntryFilter looks for a specific entry. func SingleEntryFilter(mp string) FilterFunc { return func(m *Info) (bool, bool) { if m.Mountpoint == mp { @@ -34,8 +36,8 @@ func SingleEntryFilter(mp string) FilterFunc { // ParentsFilter returns all entries whose mount points // can be parents of a path specified, discarding others. // -// For example, given `/var/lib/docker/something`, entries -// like `/var/lib/docker`, `/var` and `/` are returned. +// For example, given /var/lib/docker/something, entries +// like /var/lib/docker, /var and / are returned. func ParentsFilter(path string) FilterFunc { return func(m *Info) (bool, bool) { skip := !strings.HasPrefix(path, m.Mountpoint) @@ -43,12 +45,12 @@ func ParentsFilter(path string) FilterFunc { } } -// FstypeFilter returns all entries that match provided fstype(s). -func FstypeFilter(fstype ...string) FilterFunc { +// FSTypeFilter returns all entries that match provided fstype(s). +func FSTypeFilter(fstype ...string) FilterFunc { return func(m *Info) (bool, bool) { for _, t := range fstype { - if m.Fstype == t { - return false, false // don't skeep, keep going + if m.FSType == t { + return false, false // don't skip, keep going } } return true, false // skip, keep going diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go index a2c60e91..e591c836 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go @@ -1,5 +1,3 @@ -// +build go1.13 - package mountinfo import ( @@ -11,14 +9,18 @@ import ( "strings" ) -func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { +// GetMountsFromReader retrieves a list of mounts from the +// reader provided, with an optional filter applied (use nil +// for no filter). This can be useful in tests or benchmarks +// that provide a fake mountinfo data. +// +// This function is Linux-specific. +func GetMountsFromReader(r io.Reader, filter FilterFunc) ([]*Info, error) { s := bufio.NewScanner(r) out := []*Info{} - var err error for s.Scan() { - if err = s.Err(); err != nil { - return nil, err - } + var err error + /* See http://man7.org/linux/man-pages/man5/proc.5.html @@ -70,25 +72,19 @@ func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { p := &Info{} - // Fill in the fields that a filter might check - // (currently only Mountpoint and Fstype) - p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`) + p.Mountpoint, err = unescape(fields[4]) if err != nil { - return nil, fmt.Errorf("Parsing '%s' failed: unable to unquote mount point field: %w", fields[4], err) + return nil, fmt.Errorf("Parsing '%s' failed: mount point: %w", fields[4], err) } - p.Fstype = fields[sepIdx+1] - - // Run a filter soon so we can skip parsing/adding entries - // the caller is not interested in - var skip, stop bool - if filter != nil { - skip, stop = filter(p) - if skip { - continue - } + p.FSType, err = unescape(fields[sepIdx+1]) + if err != nil { + return nil, fmt.Errorf("Parsing '%s' failed: fstype: %w", fields[sepIdx+1], err) } - - // Fill in the rest of the fields + p.Source, err = unescape(fields[sepIdx+2]) + if err != nil { + return nil, fmt.Errorf("Parsing '%s' failed: source: %w", fields[sepIdx+2], err) + } + p.VFSOptions = fields[sepIdx+3] // ignore any numbers parsing errors, as there should not be any p.ID, _ = strconv.Atoi(fields[0]) @@ -100,12 +96,12 @@ func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { p.Major, _ = strconv.Atoi(mm[0]) p.Minor, _ = strconv.Atoi(mm[1]) - p.Root, err = strconv.Unquote(`"` + fields[3] + `"`) + p.Root, err = unescape(fields[3]) if err != nil { - return nil, fmt.Errorf("Parsing '%s' failed: unable to unquote root field: %w", fields[3], err) + return nil, fmt.Errorf("Parsing '%s' failed: root: %w", fields[3], err) } - p.Opts = fields[5] + p.Options = fields[5] // zero or more optional fields switch { @@ -117,14 +113,23 @@ func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { p.Optional = strings.Join(fields[6:sepIdx-1], " ") } - p.Source = fields[sepIdx+2] - p.VfsOpts = fields[sepIdx+3] + // Run the filter after parsing all of the fields. + var skip, stop bool + if filter != nil { + skip, stop = filter(p) + if skip { + continue + } + } out = append(out, p) if stop { break } } + if err := s.Err(); err != nil { + return nil, err + } return out, nil } @@ -137,12 +142,17 @@ func parseMountTable(filter FilterFunc) ([]*Info, error) { } defer f.Close() - return parseInfoFile(f, filter) + return GetMountsFromReader(f, filter) } -// PidMountInfo collects the mounts for a specific process ID. If the process -// ID is unknown, it is better to use `GetMounts` which will inspect -// "/proc/self/mountinfo" instead. +// PidMountInfo retrieves the list of mounts from a given process' mount +// namespace. Unless there is a need to get mounts from a mount namespace +// different from that of a calling process, use GetMounts. +// +// This function is Linux-specific. +// +// Deprecated: this will be removed before v1; use GetMountsFromReader with +// opened /proc//mountinfo as an argument instead. func PidMountInfo(pid int) ([]*Info, error) { f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) if err != nil { @@ -150,5 +160,63 @@ func PidMountInfo(pid int) ([]*Info, error) { } defer f.Close() - return parseInfoFile(f, nil) + return GetMountsFromReader(f, nil) +} + +// A few specific characters in mountinfo path entries (root and mountpoint) +// are escaped using a backslash followed by a character's ascii code in octal. +// +// space -- as \040 +// tab (aka \t) -- as \011 +// newline (aka \n) -- as \012 +// backslash (aka \\) -- as \134 +// +// This function converts path from mountinfo back, i.e. it unescapes the above sequences. +func unescape(path string) (string, error) { + // try to avoid copying + if strings.IndexByte(path, '\\') == -1 { + return path, nil + } + + // The following code is UTF-8 transparent as it only looks for some + // specific characters (backslash and 0..7) with values < utf8.RuneSelf, + // and everything else is passed through as is. + buf := make([]byte, len(path)) + bufLen := 0 + for i := 0; i < len(path); i++ { + if path[i] != '\\' { + buf[bufLen] = path[i] + bufLen++ + continue + } + s := path[i:] + if len(s) < 4 { + // too short + return "", fmt.Errorf("bad escape sequence %q: too short", s) + } + c := s[1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7': + v := c - '0' + for j := 2; j < 4; j++ { // one digit already; two more + if s[j] < '0' || s[j] > '7' { + return "", fmt.Errorf("bad escape sequence %q: not a digit", s[:3]) + } + x := s[j] - '0' + v = (v << 3) | x + } + if v > 255 { + return "", fmt.Errorf("bad escape sequence %q: out of range" + s[:3]) + } + buf[bufLen] = v + bufLen++ + i += 3 + continue + default: + return "", fmt.Errorf("bad escape sequence %q: not a digit" + s[:3]) + + } + } + + return string(buf[:bufLen]), nil } diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go index 8ff917e0..d33ebca0 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go @@ -1,4 +1,4 @@ -// +build !windows,!linux,!freebsd freebsd,!cgo +// +build !windows,!linux,!freebsd,!openbsd freebsd,!cgo openbsd,!cgo package mountinfo @@ -7,6 +7,12 @@ import ( "runtime" ) -func parseMountTable(f FilterFunc) ([]*Info, error) { - return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +var errNotImplemented = fmt.Errorf("not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) + +func parseMountTable(_ FilterFunc) ([]*Info, error) { + return nil, errNotImplemented +} + +func mounted(path string) (bool, error) { + return false, errNotImplemented } diff --git a/vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go b/vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go index efd0b271..13fad165 100644 --- a/vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go +++ b/vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go @@ -1,6 +1,10 @@ package mountinfo -func parseMountTable(f FilterFunc) ([]*Info, error) { +func parseMountTable(_ FilterFunc) ([]*Info, error) { // Do NOT return an error! return nil, nil } + +func mounted(_ string) (bool, error) { + return false, nil +} diff --git a/vendor/github.com/opencontainers/image-spec/identity/chainid.go b/vendor/github.com/opencontainers/image-spec/identity/chainid.go new file mode 100644 index 00000000..0bb28535 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/identity/chainid.go @@ -0,0 +1,67 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package identity provides implementations of subtle calculations pertaining +// to image and layer identity. The primary item present here is the ChainID +// calculation used in identifying the result of subsequent layer applications. +// +// Helpers are also provided here to ease transition to the +// github.com/opencontainers/go-digest package, but that package may be used +// directly. +package identity + +import "github.com/opencontainers/go-digest" + +// ChainID takes a slice of digests and returns the ChainID corresponding to +// the last entry. Typically, these are a list of layer DiffIDs, with the +// result providing the ChainID identifying the result of sequential +// application of the preceding layers. +func ChainID(dgsts []digest.Digest) digest.Digest { + chainIDs := make([]digest.Digest, len(dgsts)) + copy(chainIDs, dgsts) + ChainIDs(chainIDs) + + if len(chainIDs) == 0 { + return "" + } + return chainIDs[len(chainIDs)-1] +} + +// ChainIDs calculates the recursively applied chain id for each identifier in +// the slice. The result is written direcly back into the slice such that the +// ChainID for each item will be in the respective position. +// +// By definition of ChainID, the zeroth element will always be the same before +// and after the call. +// +// As an example, given the chain of ids `[A, B, C]`, the result `[A, +// ChainID(A|B), ChainID(A|B|C)]` will be written back to the slice. +// +// The input is provided as a return value for convenience. +// +// Typically, these are a list of layer DiffIDs, with the +// result providing the ChainID for each the result of each layer application +// sequentially. +func ChainIDs(dgsts []digest.Digest) []digest.Digest { + if len(dgsts) < 2 { + return dgsts + } + + parent := digest.FromBytes([]byte(dgsts[0] + " " + dgsts[1])) + next := dgsts[1:] + next[0] = parent + ChainIDs(next) + + return dgsts +} diff --git a/vendor/github.com/opencontainers/image-spec/identity/helpers.go b/vendor/github.com/opencontainers/image-spec/identity/helpers.go new file mode 100644 index 00000000..9d96eaa9 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/identity/helpers.go @@ -0,0 +1,40 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package identity + +import ( + _ "crypto/sha256" // side-effect to install impls, sha256 + _ "crypto/sha512" // side-effect to install impls, sha384/sh512 + + "io" + + digest "github.com/opencontainers/go-digest" +) + +// FromReader consumes the content of rd until io.EOF, returning canonical +// digest. +func FromReader(rd io.Reader) (digest.Digest, error) { + return digest.Canonical.FromReader(rd) +} + +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) digest.Digest { + return digest.Canonical.FromBytes(p) +} + +// FromString digests the input and returns a Digest. +func FromString(s string) digest.Digest { + return digest.Canonical.FromString(s) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go index 8b199d92..a4ae8901 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go @@ -3,12 +3,13 @@ package system import ( - "bufio" - "fmt" "os" "os/exec" - "syscall" + "syscall" // only for exec "unsafe" + + "github.com/opencontainers/runc/libcontainer/user" + "golang.org/x/sys/unix" ) // If arg2 is nonzero, set the "child subreaper" attribute of the @@ -53,8 +54,8 @@ func Execv(cmd string, args []string, env []string) error { return syscall.Exec(name, args, env) } -func Prlimit(pid, resource int, limit syscall.Rlimit) error { - _, _, err := syscall.RawSyscall6(syscall.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0) +func Prlimit(pid, resource int, limit unix.Rlimit) error { + _, _, err := unix.RawSyscall6(unix.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0) if err != 0 { return err } @@ -62,7 +63,7 @@ func Prlimit(pid, resource int, limit syscall.Rlimit) error { } func SetParentDeathSignal(sig uintptr) error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 { + if err := unix.Prctl(unix.PR_SET_PDEATHSIG, sig, 0, 0, 0); err != nil { return err } return nil @@ -70,15 +71,14 @@ func SetParentDeathSignal(sig uintptr) error { func GetParentDeathSignal() (ParentDeathSignal, error) { var sig int - _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0) - if err != 0 { + if err := unix.Prctl(unix.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0, 0, 0); err != nil { return -1, err } return ParentDeathSignal(sig), nil } func SetKeepCaps() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 { + if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 1, 0, 0, 0); err != nil { return err } @@ -86,7 +86,7 @@ func SetKeepCaps() error { } func ClearKeepCaps() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 { + if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 0, 0, 0, 0); err != nil { return err } @@ -94,55 +94,62 @@ func ClearKeepCaps() error { } func Setctty() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 { + if err := unix.IoctlSetInt(0, unix.TIOCSCTTY, 0); err != nil { return err } return nil } -/* - * Detect whether we are currently running in a user namespace. - * Copied from github.com/lxc/lxd/shared/util.go - */ +// RunningInUserNS detects whether we are currently running in a user namespace. +// Originally copied from github.com/lxc/lxd/shared/util.go func RunningInUserNS() bool { - file, err := os.Open("/proc/self/uid_map") + uidmap, err := user.CurrentProcessUIDMap() if err != nil { - /* - * This kernel-provided file only exists if user namespaces are - * supported - */ + // This kernel-provided file only exists if user namespaces are supported return false } - defer file.Close() + return UIDMapInUserNS(uidmap) +} - buf := bufio.NewReader(file) - l, _, err := buf.ReadLine() - if err != nil { - return false - } - - line := string(l) - var a, b, c int64 - fmt.Sscanf(line, "%d %d %d", &a, &b, &c) +func UIDMapInUserNS(uidmap []user.IDMap) bool { /* * We assume we are in the initial user namespace if we have a full * range - 4294967295 uids starting at uid 0. */ - if a == 0 && b == 0 && c == 4294967295 { + if len(uidmap) == 1 && uidmap[0].ID == 0 && uidmap[0].ParentID == 0 && uidmap[0].Count == 4294967295 { return false } return true } -// SetSubreaper sets the value i as the subreaper setting for the calling process -func SetSubreaper(i int) error { - return Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) +// GetParentNSeuid returns the euid within the parent user namespace +func GetParentNSeuid() int64 { + euid := int64(os.Geteuid()) + uidmap, err := user.CurrentProcessUIDMap() + if err != nil { + // This kernel-provided file only exists if user namespaces are supported + return euid + } + for _, um := range uidmap { + if um.ID <= euid && euid <= um.ID+um.Count-1 { + return um.ParentID + euid - um.ID + } + } + return euid } -func Prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) { - _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0) - if e1 != 0 { - err = e1 - } - return +// SetSubreaper sets the value i as the subreaper setting for the calling process +func SetSubreaper(i int) error { + return unix.Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) +} + +// GetSubreaper returns the subreaper setting for the calling process +func GetSubreaper() (int, error) { + var i uintptr + + if err := unix.Prctl(unix.PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0, 0, 0); err != nil { + return -1, err + } + + return int(i), nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go index 37808a29..79232a43 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go @@ -1,27 +1,113 @@ package system import ( + "fmt" "io/ioutil" "path/filepath" "strconv" "strings" ) -// look in /proc to find the process start time so that we can verify -// that this pid has started after ourself +// State is the status of a process. +type State rune + +const ( // Only values for Linux 3.14 and later are listed here + Dead State = 'X' + DiskSleep State = 'D' + Running State = 'R' + Sleeping State = 'S' + Stopped State = 'T' + TracingStop State = 't' + Zombie State = 'Z' +) + +// String forms of the state from proc(5)'s documentation for +// /proc/[pid]/status' "State" field. +func (s State) String() string { + switch s { + case Dead: + return "dead" + case DiskSleep: + return "disk sleep" + case Running: + return "running" + case Sleeping: + return "sleeping" + case Stopped: + return "stopped" + case TracingStop: + return "tracing stop" + case Zombie: + return "zombie" + default: + return fmt.Sprintf("unknown (%c)", s) + } +} + +// Stat_t represents the information from /proc/[pid]/stat, as +// described in proc(5) with names based on the /proc/[pid]/status +// fields. +type Stat_t struct { + // PID is the process ID. + PID uint + + // Name is the command run by the process. + Name string + + // State is the state of the process. + State State + + // StartTime is the number of clock ticks after system boot (since + // Linux 2.6). + StartTime uint64 +} + +// Stat returns a Stat_t instance for the specified process. +func Stat(pid int) (stat Stat_t, err error) { + bytes, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) + if err != nil { + return stat, err + } + return parseStat(string(bytes)) +} + +// GetProcessStartTime is deprecated. Use Stat(pid) and +// Stat_t.StartTime instead. func GetProcessStartTime(pid int) (string, error) { - data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) + stat, err := Stat(pid) if err != nil { return "", err } - - parts := strings.Split(string(data), " ") - // the starttime is located at pos 22 - // from the man page - // - // starttime %llu (was %lu before Linux 2.6) - // (22) The time the process started after system boot. In kernels before Linux 2.6, this - // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks - // (divide by sysconf(_SC_CLK_TCK)). - return parts[22-1], nil // starts at 1 + return fmt.Sprintf("%d", stat.StartTime), nil +} + +func parseStat(data string) (stat Stat_t, err error) { + // From proc(5), field 2 could contain space and is inside `(` and `)`. + // The following is an example: + // 89653 (gunicorn: maste) S 89630 89653 89653 0 -1 4194560 29689 28896 0 3 146 32 76 19 20 0 1 0 2971844 52965376 3920 18446744073709551615 1 1 0 0 0 0 0 16781312 137447943 0 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 + i := strings.LastIndex(data, ")") + if i <= 2 || i >= len(data)-1 { + return stat, fmt.Errorf("invalid stat data: %q", data) + } + + parts := strings.SplitN(data[:i], "(", 2) + if len(parts) != 2 { + return stat, fmt.Errorf("invalid stat data: %q", data) + } + + stat.Name = parts[1] + _, err = fmt.Sscanf(parts[0], "%d", &stat.PID) + if err != nil { + return stat, err + } + + // parts indexes should be offset by 3 from the field number given + // proc(5), because parts is zero-indexed and we've removed fields + // one (PID) and two (Name) in the paren-split. + parts = strings.Split(data[i+2:], " ") + var state int + fmt.Sscanf(parts[3-3], "%c", &state) + stat.State = State(state) + fmt.Sscanf(parts[22-3], "%d", &stat.StartTime) + return stat, nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go deleted file mode 100644 index 615ff4c8..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go +++ /dev/null @@ -1,40 +0,0 @@ -package system - -import ( - "fmt" - "runtime" - "syscall" -) - -// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092 -// -// We need different setns values for the different platforms and arch -// We are declaring the macro here because the SETNS syscall does not exist in th stdlib -var setNsMap = map[string]uintptr{ - "linux/386": 346, - "linux/arm64": 268, - "linux/amd64": 308, - "linux/arm": 375, - "linux/ppc": 350, - "linux/ppc64": 350, - "linux/ppc64le": 350, - "linux/s390x": 339, -} - -var sysSetns = setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] - -func SysSetns() uint32 { - return uint32(sysSetns) -} - -func Setns(fd uintptr, flags uintptr) error { - ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] - if !exists { - return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) - } - _, _, err := syscall.RawSyscall(ns, fd, flags, 0) - if err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go similarity index 61% rename from vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go rename to vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go index 3f780f31..c5ca5d86 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go @@ -1,14 +1,15 @@ -// +build linux,arm +// +build linux +// +build 386 arm package system import ( - "syscall" + "golang.org/x/sys/unix" ) // Setuid sets the uid of the calling thread to the specified uid. func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID32, uintptr(uid), 0, 0) + _, _, e1 := unix.RawSyscall(unix.SYS_SETUID32, uintptr(uid), 0, 0) if e1 != 0 { err = e1 } @@ -17,7 +18,7 @@ func Setuid(uid int) (err error) { // Setgid sets the gid of the calling thread to the specified gid. func Setgid(gid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) + _, _, e1 := unix.RawSyscall(unix.SYS_SETGID32, uintptr(gid), 0, 0) if e1 != 0 { err = e1 } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go deleted file mode 100644 index c9900651..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,386 - -package system - -import ( - "syscall" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go index 0816bf82..e05e30ad 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go @@ -1,14 +1,15 @@ -// +build linux,arm64 linux,amd64 linux,ppc linux,ppc64 linux,ppc64le linux,s390x +// +build linux +// +build arm64 amd64 mips mipsle mips64 mips64le ppc ppc64 ppc64le riscv64 s390x package system import ( - "syscall" + "golang.org/x/sys/unix" ) // Setuid sets the uid of the calling thread to the specified uid. func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := unix.RawSyscall(unix.SYS_SETUID, uintptr(uid), 0, 0) if e1 != 0 { err = e1 } @@ -17,7 +18,7 @@ func Setuid(uid int) (err error) { // Setgid sets the gid of the calling thread to the specified gid. func Setgid(gid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := unix.RawSyscall(unix.SYS_SETGID, uintptr(gid), 0, 0) if e1 != 0 { err = e1 } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go index b3a07cba..b8434f10 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go @@ -1,4 +1,4 @@ -// +build cgo,linux cgo,freebsd +// +build cgo,linux package system diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go index e7cfd62b..b94be74a 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go @@ -2,8 +2,26 @@ package system +import ( + "os" + + "github.com/opencontainers/runc/libcontainer/user" +) + // RunningInUserNS is a stub for non-Linux systems // Always returns false func RunningInUserNS() bool { return false } + +// UIDMapInUserNS is a stub for non-Linux systems +// Always returns false +func UIDMapInUserNS(uidmap []user.IDMap) bool { + return false +} + +// GetParentNSeuid returns the euid within the parent user namespace +// Always returns os.Geteuid on non-linux +func GetParentNSeuid() int { + return os.Geteuid() +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go index 30f74dfb..a6823fc9 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go @@ -1,99 +1,35 @@ package system -import ( - "syscall" - "unsafe" -) - -var _zero uintptr - -// Returns the size of xattrs and nil error -// Requires path, takes allocated []byte or nil as last argument -func Llistxattr(path string, dest []byte) (size int, err error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return -1, err - } - var newpathBytes unsafe.Pointer - if len(dest) > 0 { - newpathBytes = unsafe.Pointer(&dest[0]) - } else { - newpathBytes = unsafe.Pointer(&_zero) - } - - _size, _, errno := syscall.Syscall6(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(newpathBytes), uintptr(len(dest)), 0, 0, 0) - size = int(_size) - if errno != 0 { - return -1, errno - } - - return size, nil -} +import "golang.org/x/sys/unix" // Returns a []byte slice if the xattr is set and nil otherwise // Requires path and its attribute as arguments func Lgetxattr(path string, attr string) ([]byte, error) { var sz int - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - // Start with a 128 length byte array - sz = 128 - dest := make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - _sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) switch { - case errno == syscall.ENODATA: + case errno == unix.ENODATA: return nil, errno - case errno == syscall.ENOTSUP: + case errno == unix.ENOTSUP: return nil, errno - case errno == syscall.ERANGE: + case errno == unix.ERANGE: // 128 byte array might just not be good enough, - // A dummy buffer is used ``uintptr(0)`` to get real size + // A dummy buffer is used to get the real size // of the xattrs on disk - _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(unsafe.Pointer(nil)), uintptr(0), 0, 0) - sz = int(_sz) - if sz < 0 { + sz, errno = unix.Lgetxattr(path, attr, []byte{}) + if errno != nil { return nil, errno } dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno != 0 { + sz, errno = unix.Lgetxattr(path, attr, dest) + if errno != nil { return nil, errno } - case errno != 0: + case errno != nil: return nil, errno } - sz = int(_sz) return dest[:sz], nil } - -func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go index ab1439f3..6fd8dd0d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go @@ -2,7 +2,6 @@ package user import ( "errors" - "syscall" ) var ( @@ -13,98 +12,30 @@ var ( ErrNoGroupEntries = errors.New("no matching entries in group file") ) -func lookupUser(filter func(u User) bool) (User, error) { - // Get operating system-specific passwd reader-closer. - passwd, err := GetPasswd() - if err != nil { - return User{}, err - } - defer passwd.Close() - - // Get the users. - users, err := ParsePasswdFilter(passwd, filter) - if err != nil { - return User{}, err - } - - // No user entries found. - if len(users) == 0 { - return User{}, ErrNoPasswdEntries - } - - // Assume the first entry is the "correct" one. - return users[0], nil -} - -// CurrentUser looks up the current user by their user id in /etc/passwd. If the -// user cannot be found (or there is no /etc/passwd file on the filesystem), -// then CurrentUser returns an error. -func CurrentUser() (User, error) { - return LookupUid(syscall.Getuid()) -} - // LookupUser looks up a user by their username in /etc/passwd. If the user // cannot be found (or there is no /etc/passwd file on the filesystem), then // LookupUser returns an error. func LookupUser(username string) (User, error) { - return lookupUser(func(u User) bool { - return u.Name == username - }) + return lookupUser(username) } // LookupUid looks up a user by their user id in /etc/passwd. If the user cannot // be found (or there is no /etc/passwd file on the filesystem), then LookupId // returns an error. func LookupUid(uid int) (User, error) { - return lookupUser(func(u User) bool { - return u.Uid == uid - }) -} - -func lookupGroup(filter func(g Group) bool) (Group, error) { - // Get operating system-specific group reader-closer. - group, err := GetGroup() - if err != nil { - return Group{}, err - } - defer group.Close() - - // Get the users. - groups, err := ParseGroupFilter(group, filter) - if err != nil { - return Group{}, err - } - - // No user entries found. - if len(groups) == 0 { - return Group{}, ErrNoGroupEntries - } - - // Assume the first entry is the "correct" one. - return groups[0], nil -} - -// CurrentGroup looks up the current user's group by their primary group id's -// entry in /etc/passwd. If the group cannot be found (or there is no -// /etc/group file on the filesystem), then CurrentGroup returns an error. -func CurrentGroup() (Group, error) { - return LookupGid(syscall.Getgid()) + return lookupUid(uid) } // LookupGroup looks up a group by its name in /etc/group. If the group cannot // be found (or there is no /etc/group file on the filesystem), then LookupGroup // returns an error. func LookupGroup(groupname string) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Name == groupname - }) + return lookupGroup(groupname) } // LookupGid looks up a group by its group id in /etc/group. If the group cannot // be found (or there is no /etc/group file on the filesystem), then LookupGid // returns an error. func LookupGid(gid int) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Gid == gid - }) + return lookupGid(gid) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go index 758b734c..92b5ae8d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go @@ -5,6 +5,9 @@ package user import ( "io" "os" + "strconv" + + "golang.org/x/sys/unix" ) // Unix-specific path to the passwd and group formatted files. @@ -13,6 +16,76 @@ const ( unixGroupPath = "/etc/group" ) +func lookupUser(username string) (User, error) { + return lookupUserFunc(func(u User) bool { + return u.Name == username + }) +} + +func lookupUid(uid int) (User, error) { + return lookupUserFunc(func(u User) bool { + return u.Uid == uid + }) +} + +func lookupUserFunc(filter func(u User) bool) (User, error) { + // Get operating system-specific passwd reader-closer. + passwd, err := GetPasswd() + if err != nil { + return User{}, err + } + defer passwd.Close() + + // Get the users. + users, err := ParsePasswdFilter(passwd, filter) + if err != nil { + return User{}, err + } + + // No user entries found. + if len(users) == 0 { + return User{}, ErrNoPasswdEntries + } + + // Assume the first entry is the "correct" one. + return users[0], nil +} + +func lookupGroup(groupname string) (Group, error) { + return lookupGroupFunc(func(g Group) bool { + return g.Name == groupname + }) +} + +func lookupGid(gid int) (Group, error) { + return lookupGroupFunc(func(g Group) bool { + return g.Gid == gid + }) +} + +func lookupGroupFunc(filter func(g Group) bool) (Group, error) { + // Get operating system-specific group reader-closer. + group, err := GetGroup() + if err != nil { + return Group{}, err + } + defer group.Close() + + // Get the users. + groups, err := ParseGroupFilter(group, filter) + if err != nil { + return Group{}, err + } + + // No user entries found. + if len(groups) == 0 { + return Group{}, ErrNoGroupEntries + } + + // Assume the first entry is the "correct" one. + return groups[0], nil +} + func GetPasswdPath() (string, error) { return unixPasswdPath, nil } @@ -28,3 +101,44 @@ func GetGroupPath() (string, error) { func GetGroup() (io.ReadCloser, error) { return os.Open(unixGroupPath) } + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (User, error) { + return LookupUid(unix.Getuid()) +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (Group, error) { + return LookupGid(unix.Getgid()) +} + +func currentUserSubIDs(fileName string) ([]SubID, error) { + u, err := CurrentUser() + if err != nil { + return nil, err + } + filter := func(entry SubID) bool { + return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid) + } + return ParseSubIDFileFilter(fileName, filter) +} + +func CurrentUserSubUIDs() ([]SubID, error) { + return currentUserSubIDs("/etc/subuid") +} + +func CurrentUserSubGIDs() ([]SubID, error) { + return currentUserSubIDs("/etc/subgid") +} + +func CurrentProcessUIDMap() ([]IDMap, error) { + return ParseIDMapFile("/proc/self/uid_map") +} + +func CurrentProcessGIDMap() ([]IDMap, error) { + return ParseIDMapFile("/proc/self/gid_map") +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go deleted file mode 100644 index 72179488..00000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris - -package user - -import "io" - -func GetPasswdPath() (string, error) { - return "", ErrUnsupported -} - -func GetPasswd() (io.ReadCloser, error) { - return nil, ErrUnsupported -} - -func GetGroupPath() (string, error) { - return "", ErrUnsupported -} - -func GetGroup() (io.ReadCloser, error) { - return nil, ErrUnsupported -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_windows.go new file mode 100644 index 00000000..65cd40e9 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_windows.go @@ -0,0 +1,40 @@ +// +build windows + +package user + +import ( + "fmt" + "os/user" +) + +func lookupUser(username string) (User, error) { + u, err := user.Lookup(username) + if err != nil { + return User{}, err + } + return userFromOS(u) +} + +func lookupUid(uid int) (User, error) { + u, err := user.LookupId(fmt.Sprintf("%d", uid)) + if err != nil { + return User{}, err + } + return userFromOS(u) +} + +func lookupGroup(groupname string) (Group, error) { + g, err := user.LookupGroup(groupname) + if err != nil { + return Group{}, err + } + return groupFromOS(g) +} + +func lookupGid(gid int) (Group, error) { + g, err := user.LookupGroupId(fmt.Sprintf("%d", gid)) + if err != nil { + return Group{}, err + } + return groupFromOS(g) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go index 43fd39ef..7b912bbf 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "os" + "os/user" "strconv" "strings" ) @@ -28,6 +29,28 @@ type User struct { Shell string } +// userFromOS converts an os/user.(*User) to local User +// +// (This does not include Pass, Shell or Gecos) +func userFromOS(u *user.User) (User, error) { + newUser := User{ + Name: u.Username, + Home: u.HomeDir, + } + id, err := strconv.Atoi(u.Uid) + if err != nil { + return newUser, err + } + newUser.Uid = id + + id, err = strconv.Atoi(u.Gid) + if err != nil { + return newUser, err + } + newUser.Gid = id + return newUser, nil +} + type Group struct { Name string Pass string @@ -35,12 +58,46 @@ type Group struct { List []string } +// groupFromOS converts an os/user.(*Group) to local Group +// +// (This does not include Pass, Shell or Gecos) +func groupFromOS(g *user.Group) (Group, error) { + newGroup := Group{ + Name: g.Name, + } + + id, err := strconv.Atoi(g.Gid) + if err != nil { + return newGroup, err + } + newGroup.Gid = id + + return newGroup, nil +} + +// SubID represents an entry in /etc/sub{u,g}id +type SubID struct { + Name string + SubID int64 + Count int64 +} + +// IDMap represents an entry in /proc/PID/{u,g}id_map +type IDMap struct { + ID int64 + ParentID int64 + Count int64 +} + func parseLine(line string, v ...interface{}) { - if line == "" { + parseParts(strings.Split(line, ":"), v...) +} + +func parseParts(parts []string, v ...interface{}) { + if len(parts) == 0 { return } - parts := strings.Split(line, ":") for i, p := range parts { // Ignore cases where we don't have enough fields to populate the arguments. // Some configuration files like to misbehave. @@ -56,6 +113,8 @@ func parseLine(line string, v ...interface{}) { case *int: // "numbers", with conversion errors ignored because of some misbehaving configuration files. *e, _ = strconv.Atoi(p) + case *int64: + *e, _ = strconv.ParseInt(p, 10, 64) case *[]string: // Comma-separated lists. if p != "" { @@ -65,7 +124,7 @@ func parseLine(line string, v ...interface{}) { } default: // Someone goof'd when writing code using this function. Scream so they can hear us. - panic(fmt.Sprintf("parseLine only accepts {*string, *int, *[]string} as arguments! %#v is not a pointer!", e)) + panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e)) } } } @@ -199,18 +258,16 @@ type ExecUser struct { // files cannot be opened for any reason, the error is ignored and a nil // io.Reader is passed instead. func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { - passwd, err := os.Open(passwdPath) - if err != nil { - passwd = nil - } else { - defer passwd.Close() + var passwd, group io.Reader + + if passwdFile, err := os.Open(passwdPath); err == nil { + passwd = passwdFile + defer passwdFile.Close() } - group, err := os.Open(groupPath) - if err != nil { - group = nil - } else { - defer group.Close() + if groupFile, err := os.Open(groupPath); err == nil { + group = groupFile + defer groupFile.Close() } return GetExecUser(userSpec, defaults, passwd, group) @@ -343,7 +400,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) ( if len(groups) > 0 { // First match wins, even if there's more than one matching entry. user.Gid = groups[0].Gid - } else if groupArg != "" { + } else { // If we can't find a group with the given name, the only other valid // option is if it's a numeric group name with no associated entry in group. @@ -433,9 +490,119 @@ func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, err // that opens the groupPath given and gives it as an argument to // GetAdditionalGroups. func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { - group, err := os.Open(groupPath) - if err == nil { - defer group.Close() + var group io.Reader + + if groupFile, err := os.Open(groupPath); err == nil { + group = groupFile + defer groupFile.Close() } return GetAdditionalGroups(additionalGroups, group) } + +func ParseSubIDFile(path string) ([]SubID, error) { + subid, err := os.Open(path) + if err != nil { + return nil, err + } + defer subid.Close() + return ParseSubID(subid) +} + +func ParseSubID(subid io.Reader) ([]SubID, error) { + return ParseSubIDFilter(subid, nil) +} + +func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) { + subid, err := os.Open(path) + if err != nil { + return nil, err + } + defer subid.Close() + return ParseSubIDFilter(subid, filter) +} + +func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) { + if r == nil { + return nil, fmt.Errorf("nil source for subid-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []SubID{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + // see: man 5 subuid + p := SubID{} + parseLine(line, &p.Name, &p.SubID, &p.Count) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} + +func ParseIDMapFile(path string) ([]IDMap, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + defer r.Close() + return ParseIDMap(r) +} + +func ParseIDMap(r io.Reader) ([]IDMap, error) { + return ParseIDMapFilter(r, nil) +} + +func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) { + r, err := os.Open(path) + if err != nil { + return nil, err + } + defer r.Close() + return ParseIDMapFilter(r, filter) +} + +func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) { + if r == nil { + return nil, fmt.Errorf("nil source for idmap-formatted data") + } + + var ( + s = bufio.NewScanner(r) + out = []IDMap{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + // see: man 7 user_namespaces + p := IDMap{} + parseParts(strings.Fields(line), &p.ID, &p.ParentID, &p.Count) + + if filter == nil || filter(p) { + out = append(out, p) + } + } + + return out, nil +} diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE b/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE new file mode 100644 index 00000000..c259d129 --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2016, opentracing-contrib +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of go-stdlib nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go new file mode 100644 index 00000000..8d33bcb6 --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go @@ -0,0 +1,301 @@ +// +build go1.7 + +package nethttp + +import ( + "context" + "io" + "net/http" + "net/http/httptrace" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" +) + +type contextKey int + +const ( + keyTracer contextKey = iota +) + +const defaultComponentName = "net/http" + +// Transport wraps a RoundTripper. If a request is being traced with +// Tracer, Transport will inject the current span into the headers, +// and set HTTP related tags on the span. +type Transport struct { + // The actual RoundTripper to use for the request. A nil + // RoundTripper defaults to http.DefaultTransport. + http.RoundTripper +} + +type clientOptions struct { + operationName string + componentName string + disableClientTrace bool +} + +// ClientOption contols the behavior of TraceRequest. +type ClientOption func(*clientOptions) + +// OperationName returns a ClientOption that sets the operation +// name for the client-side span. +func OperationName(operationName string) ClientOption { + return func(options *clientOptions) { + options.operationName = operationName + } +} + +// ComponentName returns a ClientOption that sets the component +// name for the client-side span. +func ComponentName(componentName string) ClientOption { + return func(options *clientOptions) { + options.componentName = componentName + } +} + +// ClientTrace returns a ClientOption that turns on or off +// extra instrumentation via httptrace.WithClientTrace. +func ClientTrace(enabled bool) ClientOption { + return func(options *clientOptions) { + options.disableClientTrace = !enabled + } +} + +// TraceRequest adds a ClientTracer to req, tracing the request and +// all requests caused due to redirects. When tracing requests this +// way you must also use Transport. +// +// Example: +// +// func AskGoogle(ctx context.Context) error { +// client := &http.Client{Transport: &nethttp.Transport{}} +// req, err := http.NewRequest("GET", "http://google.com", nil) +// if err != nil { +// return err +// } +// req = req.WithContext(ctx) // extend existing trace, if any +// +// req, ht := nethttp.TraceRequest(tracer, req) +// defer ht.Finish() +// +// res, err := client.Do(req) +// if err != nil { +// return err +// } +// res.Body.Close() +// return nil +// } +func TraceRequest(tr opentracing.Tracer, req *http.Request, options ...ClientOption) (*http.Request, *Tracer) { + opts := &clientOptions{} + for _, opt := range options { + opt(opts) + } + ht := &Tracer{tr: tr, opts: opts} + ctx := req.Context() + if !opts.disableClientTrace { + ctx = httptrace.WithClientTrace(ctx, ht.clientTrace()) + } + req = req.WithContext(context.WithValue(ctx, keyTracer, ht)) + return req, ht +} + +type closeTracker struct { + io.ReadCloser + sp opentracing.Span +} + +func (c closeTracker) Close() error { + err := c.ReadCloser.Close() + c.sp.LogFields(log.String("event", "ClosedBody")) + c.sp.Finish() + return err +} + +// RoundTrip implements the RoundTripper interface. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.RoundTripper + if rt == nil { + rt = http.DefaultTransport + } + tracer, ok := req.Context().Value(keyTracer).(*Tracer) + if !ok { + return rt.RoundTrip(req) + } + + tracer.start(req) + + ext.HTTPMethod.Set(tracer.sp, req.Method) + ext.HTTPUrl.Set(tracer.sp, req.URL.String()) + + carrier := opentracing.HTTPHeadersCarrier(req.Header) + tracer.sp.Tracer().Inject(tracer.sp.Context(), opentracing.HTTPHeaders, carrier) + resp, err := rt.RoundTrip(req) + + if err != nil { + tracer.sp.Finish() + return resp, err + } + ext.HTTPStatusCode.Set(tracer.sp, uint16(resp.StatusCode)) + if req.Method == "HEAD" { + tracer.sp.Finish() + } else { + resp.Body = closeTracker{resp.Body, tracer.sp} + } + return resp, nil +} + +// Tracer holds tracing details for one HTTP request. +type Tracer struct { + tr opentracing.Tracer + root opentracing.Span + sp opentracing.Span + opts *clientOptions +} + +func (h *Tracer) start(req *http.Request) opentracing.Span { + if h.root == nil { + parent := opentracing.SpanFromContext(req.Context()) + var spanctx opentracing.SpanContext + if parent != nil { + spanctx = parent.Context() + } + operationName := h.opts.operationName + if operationName == "" { + operationName = "HTTP Client" + } + root := h.tr.StartSpan(operationName, opentracing.ChildOf(spanctx)) + h.root = root + } + + ctx := h.root.Context() + h.sp = h.tr.StartSpan("HTTP "+req.Method, opentracing.ChildOf(ctx)) + ext.SpanKindRPCClient.Set(h.sp) + + componentName := h.opts.componentName + if componentName == "" { + componentName = defaultComponentName + } + ext.Component.Set(h.sp, componentName) + + return h.sp +} + +// Finish finishes the span of the traced request. +func (h *Tracer) Finish() { + if h.root != nil { + h.root.Finish() + } +} + +// Span returns the root span of the traced request. This function +// should only be called after the request has been executed. +func (h *Tracer) Span() opentracing.Span { + return h.root +} + +func (h *Tracer) clientTrace() *httptrace.ClientTrace { + return &httptrace.ClientTrace{ + GetConn: h.getConn, + GotConn: h.gotConn, + PutIdleConn: h.putIdleConn, + GotFirstResponseByte: h.gotFirstResponseByte, + Got100Continue: h.got100Continue, + DNSStart: h.dnsStart, + DNSDone: h.dnsDone, + ConnectStart: h.connectStart, + ConnectDone: h.connectDone, + WroteHeaders: h.wroteHeaders, + Wait100Continue: h.wait100Continue, + WroteRequest: h.wroteRequest, + } +} + +func (h *Tracer) getConn(hostPort string) { + ext.HTTPUrl.Set(h.sp, hostPort) + h.sp.LogFields(log.String("event", "GetConn")) +} + +func (h *Tracer) gotConn(info httptrace.GotConnInfo) { + h.sp.SetTag("net/http.reused", info.Reused) + h.sp.SetTag("net/http.was_idle", info.WasIdle) + h.sp.LogFields(log.String("event", "GotConn")) +} + +func (h *Tracer) putIdleConn(error) { + h.sp.LogFields(log.String("event", "PutIdleConn")) +} + +func (h *Tracer) gotFirstResponseByte() { + h.sp.LogFields(log.String("event", "GotFirstResponseByte")) +} + +func (h *Tracer) got100Continue() { + h.sp.LogFields(log.String("event", "Got100Continue")) +} + +func (h *Tracer) dnsStart(info httptrace.DNSStartInfo) { + h.sp.LogFields( + log.String("event", "DNSStart"), + log.String("host", info.Host), + ) +} + +func (h *Tracer) dnsDone(info httptrace.DNSDoneInfo) { + fields := []log.Field{log.String("event", "DNSDone")} + for _, addr := range info.Addrs { + fields = append(fields, log.String("addr", addr.String())) + } + if info.Err != nil { + fields = append(fields, log.Error(info.Err)) + } + h.sp.LogFields(fields...) +} + +func (h *Tracer) connectStart(network, addr string) { + h.sp.LogFields( + log.String("event", "ConnectStart"), + log.String("network", network), + log.String("addr", addr), + ) +} + +func (h *Tracer) connectDone(network, addr string, err error) { + if err != nil { + h.sp.LogFields( + log.String("message", "ConnectDone"), + log.String("network", network), + log.String("addr", addr), + log.String("event", "error"), + log.Error(err), + ) + } else { + h.sp.LogFields( + log.String("event", "ConnectDone"), + log.String("network", network), + log.String("addr", addr), + ) + } +} + +func (h *Tracer) wroteHeaders() { + h.sp.LogFields(log.String("event", "WroteHeaders")) +} + +func (h *Tracer) wait100Continue() { + h.sp.LogFields(log.String("event", "Wait100Continue")) +} + +func (h *Tracer) wroteRequest(info httptrace.WroteRequestInfo) { + if info.Err != nil { + h.sp.LogFields( + log.String("message", "WroteRequest"), + log.String("event", "error"), + log.Error(info.Err), + ) + ext.Error.Set(h.sp, true) + } else { + h.sp.LogFields(log.String("event", "WroteRequest")) + } +} diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/doc.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/doc.go new file mode 100644 index 00000000..c853ca64 --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/doc.go @@ -0,0 +1,3 @@ +// Package nethttp provides OpenTracing instrumentation for the +// net/http package. +package nethttp diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go new file mode 100644 index 00000000..6e9d8f59 --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go @@ -0,0 +1,119 @@ +// +build go1.7 + +package nethttp + +import ( + "net/http" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" +) + +type statusCodeTracker struct { + http.ResponseWriter + status int +} + +func (w *statusCodeTracker) WriteHeader(status int) { + w.status = status + w.ResponseWriter.WriteHeader(status) +} + +type mwOptions struct { + opNameFunc func(r *http.Request) string + spanObserver func(span opentracing.Span, r *http.Request) + componentName string +} + +// MWOption controls the behavior of the Middleware. +type MWOption func(*mwOptions) + +// OperationNameFunc returns a MWOption that uses given function f +// to generate operation name for each server-side span. +func OperationNameFunc(f func(r *http.Request) string) MWOption { + return func(options *mwOptions) { + options.opNameFunc = f + } +} + +// MWComponentName returns a MWOption that sets the component name +// for the server-side span. +func MWComponentName(componentName string) MWOption { + return func(options *mwOptions) { + options.componentName = componentName + } +} + +// MWSpanObserver returns a MWOption that observe the span +// for the server-side span. +func MWSpanObserver(f func(span opentracing.Span, r *http.Request)) MWOption { + return func(options *mwOptions) { + options.spanObserver = f + } +} + +// Middleware wraps an http.Handler and traces incoming requests. +// Additionally, it adds the span to the request's context. +// +// By default, the operation name of the spans is set to "HTTP {method}". +// This can be overriden with options. +// +// Example: +// http.ListenAndServe("localhost:80", nethttp.Middleware(tracer, http.DefaultServeMux)) +// +// The options allow fine tuning the behavior of the middleware. +// +// Example: +// mw := nethttp.Middleware( +// tracer, +// http.DefaultServeMux, +// nethttp.OperationNameFunc(func(r *http.Request) string { +// return "HTTP " + r.Method + ":/api/customers" +// }), +// nethttp.MWSpanObserver(func(sp opentracing.Span, r *http.Request) { +// sp.SetTag("http.uri", r.URL.EscapedPath()) +// }), +// ) +func Middleware(tr opentracing.Tracer, h http.Handler, options ...MWOption) http.Handler { + return MiddlewareFunc(tr, h.ServeHTTP, options...) +} + +// MiddlewareFunc wraps an http.HandlerFunc and traces incoming requests. +// It behaves identically to the Middleware function above. +// +// Example: +// http.ListenAndServe("localhost:80", nethttp.MiddlewareFunc(tracer, MyHandler)) +func MiddlewareFunc(tr opentracing.Tracer, h http.HandlerFunc, options ...MWOption) http.HandlerFunc { + opts := mwOptions{ + opNameFunc: func(r *http.Request) string { + return "HTTP " + r.Method + }, + spanObserver: func(span opentracing.Span, r *http.Request) {}, + } + for _, opt := range options { + opt(&opts) + } + fn := func(w http.ResponseWriter, r *http.Request) { + ctx, _ := tr.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(r.Header)) + sp := tr.StartSpan(opts.opNameFunc(r), ext.RPCServerOption(ctx)) + ext.HTTPMethod.Set(sp, r.Method) + ext.HTTPUrl.Set(sp, r.URL.String()) + opts.spanObserver(sp, r) + + // set component name, use "net/http" if caller does not specify + componentName := opts.componentName + if componentName == "" { + componentName = defaultComponentName + } + ext.Component.Set(sp, componentName) + + w = &statusCodeTracker{w, 200} + r = r.WithContext(opentracing.ContextWithSpan(r.Context(), sp)) + + h(w, r) + + ext.HTTPStatusCode.Set(sp, uint16(w.(*statusCodeTracker).status)) + sp.Finish() + } + return http.HandlerFunc(fn) +} diff --git a/vendor/github.com/opentracing/opentracing-go/.gitignore b/vendor/github.com/opentracing/opentracing-go/.gitignore new file mode 100644 index 00000000..c57100a5 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/.gitignore @@ -0,0 +1 @@ +coverage.txt diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml new file mode 100644 index 00000000..8d5b75e4 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/.travis.yml @@ -0,0 +1,20 @@ +language: go + +matrix: + include: + - go: "1.11.x" + - go: "1.12.x" + - go: "tip" + env: + - LINT=true + - COVERAGE=true + +install: + - if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi + - go get -u github.com/stretchr/testify/... + +script: + - make test + - go build ./... + - if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi + - if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md new file mode 100644 index 00000000..7c14febe --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md @@ -0,0 +1,46 @@ +Changes by Version +================== + +1.1.0 (2019-03-23) +------------------- + +Notable changes: +- The library is now released under Apache 2.0 license +- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159)) +- 'golang.org/x/net/context' is replaced with 'context' from the standard library + +List of all changes: + +- Export StartSpanFromContextWithTracer (#214) +- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) +- Use Set() instead of Add() in HTTPHeadersCarrier (#191) +- Update license to Apache 2.0 (#181) +- Replace 'golang.org/x/net/context' with 'context' (#176) +- Port of Python opentracing/harness/api_check.py to Go (#146) +- Fix race condition in MockSpan.Context() (#170) +- Add PeerHostIPv4.SetString() (#155) +- Add a Noop log field type to log to allow for optional fields (#150) + + +1.0.2 (2017-04-26) +------------------- + +- Add more semantic tags (#139) + + +1.0.1 (2017-02-06) +------------------- + +- Correct spelling in comments +- Address race in nextMockID() (#123) +- log: avoid panic marshaling nil error (#131) +- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) +- Drop Go 1.5 that fails in Travis (#129) +- Add convenience methods Key() and Value() to log.Field +- Add convenience methods to log.Field (2 years, 6 months ago) + +1.0.0 (2016-09-26) +------------------- + +- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec) + diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE new file mode 100644 index 00000000..f0027349 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The OpenTracing Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile new file mode 100644 index 00000000..62abb63f --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/Makefile @@ -0,0 +1,20 @@ +.DEFAULT_GOAL := test-and-lint + +.PHONY: test-and-lint +test-and-lint: test lint + +.PHONY: test +test: + go test -v -cover -race ./... + +.PHONY: cover +cover: + go test -v -coverprofile=coverage.txt -covermode=atomic -race ./... + +.PHONY: lint +lint: + go fmt ./... + golint ./... + @# Run again with magic to exit non-zero if golint outputs anything. + @! (golint ./... | read dummy) + go vet ./... diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md new file mode 100644 index 00000000..6ef1d7c9 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/README.md @@ -0,0 +1,171 @@ +[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge) + +# OpenTracing API for Go + +This package is a Go platform API for OpenTracing. + +## Required Reading + +In order to understand the Go platform API, one must first be familiar with the +[OpenTracing project](https://opentracing.io) and +[terminology](https://opentracing.io/specification/) more specifically. + +## API overview for those adding instrumentation + +Everyday consumers of this `opentracing` package really only need to worry +about a couple of key abstractions: the `StartSpan` function, the `Span` +interface, and binding a `Tracer` at `main()`-time. Here are code snippets +demonstrating some important use cases. + +#### Singleton initialization + +The simplest starting point is `./default_tracer.go`. As early as possible, call + +```go + import "github.com/opentracing/opentracing-go" + import ".../some_tracing_impl" + + func main() { + opentracing.SetGlobalTracer( + // tracing impl specific: + some_tracing_impl.New(...), + ) + ... + } +``` + +#### Non-Singleton initialization + +If you prefer direct control to singletons, manage ownership of the +`opentracing.Tracer` implementation explicitly. + +#### Creating a Span given an existing Go `context.Context` + +If you use `context.Context` in your application, OpenTracing's Go library will +happily rely on it for `Span` propagation. To start a new (blocking child) +`Span`, you can use `StartSpanFromContext`. + +```go + func xyz(ctx context.Context, ...) { + ... + span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name") + defer span.Finish() + span.LogFields( + log.String("event", "soft error"), + log.String("type", "cache timeout"), + log.Int("waited.millis", 1500)) + ... + } +``` + +#### Starting an empty trace by creating a "root span" + +It's always possible to create a "root" `Span` with no parent or other causal +reference. + +```go + func xyz() { + ... + sp := opentracing.StartSpan("operation_name") + defer sp.Finish() + ... + } +``` + +#### Creating a (child) Span given an existing (parent) Span + +```go + func xyz(parentSpan opentracing.Span, ...) { + ... + sp := opentracing.StartSpan( + "operation_name", + opentracing.ChildOf(parentSpan.Context())) + defer sp.Finish() + ... + } +``` + +#### Serializing to the wire + +```go + func makeSomeRequest(ctx context.Context) ... { + if span := opentracing.SpanFromContext(ctx); span != nil { + httpClient := &http.Client{} + httpReq, _ := http.NewRequest("GET", "http://myservice/", nil) + + // Transmit the span's TraceContext as HTTP headers on our + // outbound request. + opentracing.GlobalTracer().Inject( + span.Context(), + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(httpReq.Header)) + + resp, err := httpClient.Do(httpReq) + ... + } + ... + } +``` + +#### Deserializing from the wire + +```go + http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + var serverSpan opentracing.Span + appSpecificOperationName := ... + wireContext, err := opentracing.GlobalTracer().Extract( + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(req.Header)) + if err != nil { + // Optionally record something about err here + } + + // Create the span referring to the RPC client if available. + // If wireContext == nil, a root span will be created. + serverSpan = opentracing.StartSpan( + appSpecificOperationName, + ext.RPCServerOption(wireContext)) + + defer serverSpan.Finish() + + ctx := opentracing.ContextWithSpan(context.Background(), serverSpan) + ... + } +``` + +#### Conditionally capture a field using `log.Noop` + +In some situations, you may want to dynamically decide whether or not +to log a field. For example, you may want to capture additional data, +such as a customer ID, in non-production environments: + +```go + func Customer(order *Order) log.Field { + if os.Getenv("ENVIRONMENT") == "dev" { + return log.String("customer", order.Customer.ID) + } + return log.Noop() + } +``` + +#### Goroutine-safety + +The entire public API is goroutine-safe and does not require external +synchronization. + +## API pointers for those implementing a tracing system + +Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`. + +## API compatibility + +For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority. + +## Tracer test suite + +A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly. + +## Licensing + +[Apache 2.0 License](./LICENSE). diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go new file mode 100644 index 00000000..52e88958 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go @@ -0,0 +1,210 @@ +package ext + +import "github.com/opentracing/opentracing-go" + +// These constants define common tag names recommended for better portability across +// tracing systems and languages/platforms. +// +// The tag names are defined as typed strings, so that in addition to the usual use +// +// span.setTag(TagName, value) +// +// they also support value type validation via this additional syntax: +// +// TagName.Set(span, value) +// +var ( + ////////////////////////////////////////////////////////////////////// + // SpanKind (client/server or producer/consumer) + ////////////////////////////////////////////////////////////////////// + + // SpanKind hints at relationship between spans, e.g. client/server + SpanKind = spanKindTagName("span.kind") + + // SpanKindRPCClient marks a span representing the client-side of an RPC + // or other remote call + SpanKindRPCClientEnum = SpanKindEnum("client") + SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum} + + // SpanKindRPCServer marks a span representing the server-side of an RPC + // or other remote call + SpanKindRPCServerEnum = SpanKindEnum("server") + SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum} + + // SpanKindProducer marks a span representing the producer-side of a + // message bus + SpanKindProducerEnum = SpanKindEnum("producer") + SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum} + + // SpanKindConsumer marks a span representing the consumer-side of a + // message bus + SpanKindConsumerEnum = SpanKindEnum("consumer") + SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum} + + ////////////////////////////////////////////////////////////////////// + // Component name + ////////////////////////////////////////////////////////////////////// + + // Component is a low-cardinality identifier of the module, library, + // or package that is generating a span. + Component = stringTagName("component") + + ////////////////////////////////////////////////////////////////////// + // Sampling hint + ////////////////////////////////////////////////////////////////////// + + // SamplingPriority determines the priority of sampling this Span. + SamplingPriority = uint16TagName("sampling.priority") + + ////////////////////////////////////////////////////////////////////// + // Peer tags. These tags can be emitted by either client-side of + // server-side to describe the other side/service in a peer-to-peer + // communications, like an RPC call. + ////////////////////////////////////////////////////////////////////// + + // PeerService records the service name of the peer. + PeerService = stringTagName("peer.service") + + // PeerAddress records the address name of the peer. This may be a "ip:port", + // a bare "hostname", a FQDN or even a database DSN substring + // like "mysql://username@127.0.0.1:3306/dbname" + PeerAddress = stringTagName("peer.address") + + // PeerHostname records the host name of the peer + PeerHostname = stringTagName("peer.hostname") + + // PeerHostIPv4 records IP v4 host address of the peer + PeerHostIPv4 = ipv4Tag("peer.ipv4") + + // PeerHostIPv6 records IP v6 host address of the peer + PeerHostIPv6 = stringTagName("peer.ipv6") + + // PeerPort records port number of the peer + PeerPort = uint16TagName("peer.port") + + ////////////////////////////////////////////////////////////////////// + // HTTP Tags + ////////////////////////////////////////////////////////////////////// + + // HTTPUrl should be the URL of the request being handled in this segment + // of the trace, in standard URI format. The protocol is optional. + HTTPUrl = stringTagName("http.url") + + // HTTPMethod is the HTTP method of the request, and is case-insensitive. + HTTPMethod = stringTagName("http.method") + + // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the + // HTTP response. + HTTPStatusCode = uint16TagName("http.status_code") + + ////////////////////////////////////////////////////////////////////// + // DB Tags + ////////////////////////////////////////////////////////////////////// + + // DBInstance is database instance name. + DBInstance = stringTagName("db.instance") + + // DBStatement is a database statement for the given database type. + // It can be a query or a prepared statement (i.e., before substitution). + DBStatement = stringTagName("db.statement") + + // DBType is a database type. For any SQL database, "sql". + // For others, the lower-case database category, e.g. "redis" + DBType = stringTagName("db.type") + + // DBUser is a username for accessing database. + DBUser = stringTagName("db.user") + + ////////////////////////////////////////////////////////////////////// + // Message Bus Tag + ////////////////////////////////////////////////////////////////////// + + // MessageBusDestination is an address at which messages can be exchanged + MessageBusDestination = stringTagName("message_bus.destination") + + ////////////////////////////////////////////////////////////////////// + // Error Tag + ////////////////////////////////////////////////////////////////////// + + // Error indicates that operation represented by the span resulted in an error. + Error = boolTagName("error") +) + +// --- + +// SpanKindEnum represents common span types +type SpanKindEnum string + +type spanKindTagName string + +// Set adds a string tag to the `span` +func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) { + span.SetTag(string(tag), value) +} + +type rpcServerOption struct { + clientContext opentracing.SpanContext +} + +func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) { + if r.clientContext != nil { + opentracing.ChildOf(r.clientContext).Apply(o) + } + SpanKindRPCServer.Apply(o) +} + +// RPCServerOption returns a StartSpanOption appropriate for an RPC server span +// with `client` representing the metadata for the remote peer Span if available. +// In case client == nil, due to the client not being instrumented, this RPC +// server span will be a root span. +func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption { + return rpcServerOption{client} +} + +// --- + +type stringTagName string + +// Set adds a string tag to the `span` +func (tag stringTagName) Set(span opentracing.Span, value string) { + span.SetTag(string(tag), value) +} + +// --- + +type uint32TagName string + +// Set adds a uint32 tag to the `span` +func (tag uint32TagName) Set(span opentracing.Span, value uint32) { + span.SetTag(string(tag), value) +} + +// --- + +type uint16TagName string + +// Set adds a uint16 tag to the `span` +func (tag uint16TagName) Set(span opentracing.Span, value uint16) { + span.SetTag(string(tag), value) +} + +// --- + +type boolTagName string + +// Add adds a bool tag to the `span` +func (tag boolTagName) Set(span opentracing.Span, value bool) { + span.SetTag(string(tag), value) +} + +type ipv4Tag string + +// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility +func (tag ipv4Tag) Set(span opentracing.Span, value uint32) { + span.SetTag(string(tag), value) +} + +// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1" +func (tag ipv4Tag) SetString(span opentracing.Span, value string) { + span.SetTag(string(tag), value) +} diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go new file mode 100644 index 00000000..4f7066a9 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go @@ -0,0 +1,42 @@ +package opentracing + +type registeredTracer struct { + tracer Tracer + isRegistered bool +} + +var ( + globalTracer = registeredTracer{NoopTracer{}, false} +) + +// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by +// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an +// opentracing.Tracer instance) should call SetGlobalTracer as early as +// possible in main(), prior to calling the `StartSpan` global func below. +// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` +// (etc) globals are noops. +func SetGlobalTracer(tracer Tracer) { + globalTracer = registeredTracer{tracer, true} +} + +// GlobalTracer returns the global singleton `Tracer` implementation. +// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop +// implementation that drops all data handed to it. +func GlobalTracer() Tracer { + return globalTracer.tracer +} + +// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. +func StartSpan(operationName string, opts ...StartSpanOption) Span { + return globalTracer.tracer.StartSpan(operationName, opts...) +} + +// InitGlobalTracer is deprecated. Please use SetGlobalTracer. +func InitGlobalTracer(tracer Tracer) { + SetGlobalTracer(tracer) +} + +// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered +func IsGlobalTracerRegistered() bool { + return globalTracer.isRegistered +} diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go new file mode 100644 index 00000000..08c00c04 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go @@ -0,0 +1,60 @@ +package opentracing + +import "context" + +type contextKey struct{} + +var activeSpanKey = contextKey{} + +// ContextWithSpan returns a new `context.Context` that holds a reference to +// `span`'s SpanContext. +func ContextWithSpan(ctx context.Context, span Span) context.Context { + return context.WithValue(ctx, activeSpanKey, span) +} + +// SpanFromContext returns the `Span` previously associated with `ctx`, or +// `nil` if no such `Span` could be found. +// +// NOTE: context.Context != SpanContext: the former is Go's intra-process +// context propagation mechanism, and the latter houses OpenTracing's per-Span +// identity and baggage information. +func SpanFromContext(ctx context.Context) Span { + val := ctx.Value(activeSpanKey) + if sp, ok := val.(Span); ok { + return sp + } + return nil +} + +// StartSpanFromContext starts and returns a Span with `operationName`, using +// any Span found within `ctx` as a ChildOfRef. If no such parent could be +// found, StartSpanFromContext creates a root (parentless) Span. +// +// The second return value is a context.Context object built around the +// returned Span. +// +// Example usage: +// +// SomeFunction(ctx context.Context, ...) { +// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction") +// defer sp.Finish() +// ... +// } +func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { + return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) +} + +// StartSpanFromContextWithTracer starts and returns a span with `operationName` +// using a span found within the context as a ChildOfRef. If that doesn't exist +// it creates a root span. It also returns a context.Context object built +// around the returned span. +// +// It's behavior is identical to StartSpanFromContext except that it takes an explicit +// tracer as opposed to using the global tracer. +func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { + if parentSpan := SpanFromContext(ctx); parentSpan != nil { + opts = append(opts, ChildOf(parentSpan.Context())) + } + span := tracer.StartSpan(operationName, opts...) + return span, ContextWithSpan(ctx, span) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go new file mode 100644 index 00000000..50feea34 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/field.go @@ -0,0 +1,269 @@ +package log + +import ( + "fmt" + "math" +) + +type fieldType int + +const ( + stringType fieldType = iota + boolType + intType + int32Type + uint32Type + int64Type + uint64Type + float32Type + float64Type + errorType + objectType + lazyLoggerType + noopType +) + +// Field instances are constructed via LogBool, LogString, and so on. +// Tracing implementations may then handle them via the Field.Marshal +// method. +// +// "heavily influenced by" (i.e., partially stolen from) +// https://github.com/uber-go/zap +type Field struct { + key string + fieldType fieldType + numericVal int64 + stringVal string + interfaceVal interface{} +} + +// String adds a string-valued key:value pair to a Span.LogFields() record +func String(key, val string) Field { + return Field{ + key: key, + fieldType: stringType, + stringVal: val, + } +} + +// Bool adds a bool-valued key:value pair to a Span.LogFields() record +func Bool(key string, val bool) Field { + var numericVal int64 + if val { + numericVal = 1 + } + return Field{ + key: key, + fieldType: boolType, + numericVal: numericVal, + } +} + +// Int adds an int-valued key:value pair to a Span.LogFields() record +func Int(key string, val int) Field { + return Field{ + key: key, + fieldType: intType, + numericVal: int64(val), + } +} + +// Int32 adds an int32-valued key:value pair to a Span.LogFields() record +func Int32(key string, val int32) Field { + return Field{ + key: key, + fieldType: int32Type, + numericVal: int64(val), + } +} + +// Int64 adds an int64-valued key:value pair to a Span.LogFields() record +func Int64(key string, val int64) Field { + return Field{ + key: key, + fieldType: int64Type, + numericVal: val, + } +} + +// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record +func Uint32(key string, val uint32) Field { + return Field{ + key: key, + fieldType: uint32Type, + numericVal: int64(val), + } +} + +// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record +func Uint64(key string, val uint64) Field { + return Field{ + key: key, + fieldType: uint64Type, + numericVal: int64(val), + } +} + +// Float32 adds a float32-valued key:value pair to a Span.LogFields() record +func Float32(key string, val float32) Field { + return Field{ + key: key, + fieldType: float32Type, + numericVal: int64(math.Float32bits(val)), + } +} + +// Float64 adds a float64-valued key:value pair to a Span.LogFields() record +func Float64(key string, val float64) Field { + return Field{ + key: key, + fieldType: float64Type, + numericVal: int64(math.Float64bits(val)), + } +} + +// Error adds an error with the key "error" to a Span.LogFields() record +func Error(err error) Field { + return Field{ + key: "error", + fieldType: errorType, + interfaceVal: err, + } +} + +// Object adds an object-valued key:value pair to a Span.LogFields() record +func Object(key string, obj interface{}) Field { + return Field{ + key: key, + fieldType: objectType, + interfaceVal: obj, + } +} + +// LazyLogger allows for user-defined, late-bound logging of arbitrary data +type LazyLogger func(fv Encoder) + +// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing +// implementation will call the LazyLogger function at an indefinite time in +// the future (after Lazy() returns). +func Lazy(ll LazyLogger) Field { + return Field{ + fieldType: lazyLoggerType, + interfaceVal: ll, + } +} + +// Noop creates a no-op log field that should be ignored by the tracer. +// It can be used to capture optional fields, for example those that should +// only be logged in non-production environment: +// +// func customerField(order *Order) log.Field { +// if os.Getenv("ENVIRONMENT") == "dev" { +// return log.String("customer", order.Customer.ID) +// } +// return log.Noop() +// } +// +// span.LogFields(log.String("event", "purchase"), customerField(order)) +// +func Noop() Field { + return Field{ + fieldType: noopType, + } +} + +// Encoder allows access to the contents of a Field (via a call to +// Field.Marshal). +// +// Tracer implementations typically provide an implementation of Encoder; +// OpenTracing callers typically do not need to concern themselves with it. +type Encoder interface { + EmitString(key, value string) + EmitBool(key string, value bool) + EmitInt(key string, value int) + EmitInt32(key string, value int32) + EmitInt64(key string, value int64) + EmitUint32(key string, value uint32) + EmitUint64(key string, value uint64) + EmitFloat32(key string, value float32) + EmitFloat64(key string, value float64) + EmitObject(key string, value interface{}) + EmitLazyLogger(value LazyLogger) +} + +// Marshal passes a Field instance through to the appropriate +// field-type-specific method of an Encoder. +func (lf Field) Marshal(visitor Encoder) { + switch lf.fieldType { + case stringType: + visitor.EmitString(lf.key, lf.stringVal) + case boolType: + visitor.EmitBool(lf.key, lf.numericVal != 0) + case intType: + visitor.EmitInt(lf.key, int(lf.numericVal)) + case int32Type: + visitor.EmitInt32(lf.key, int32(lf.numericVal)) + case int64Type: + visitor.EmitInt64(lf.key, int64(lf.numericVal)) + case uint32Type: + visitor.EmitUint32(lf.key, uint32(lf.numericVal)) + case uint64Type: + visitor.EmitUint64(lf.key, uint64(lf.numericVal)) + case float32Type: + visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal))) + case float64Type: + visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal))) + case errorType: + if err, ok := lf.interfaceVal.(error); ok { + visitor.EmitString(lf.key, err.Error()) + } else { + visitor.EmitString(lf.key, "") + } + case objectType: + visitor.EmitObject(lf.key, lf.interfaceVal) + case lazyLoggerType: + visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger)) + case noopType: + // intentionally left blank + } +} + +// Key returns the field's key. +func (lf Field) Key() string { + return lf.key +} + +// Value returns the field's value as interface{}. +func (lf Field) Value() interface{} { + switch lf.fieldType { + case stringType: + return lf.stringVal + case boolType: + return lf.numericVal != 0 + case intType: + return int(lf.numericVal) + case int32Type: + return int32(lf.numericVal) + case int64Type: + return int64(lf.numericVal) + case uint32Type: + return uint32(lf.numericVal) + case uint64Type: + return uint64(lf.numericVal) + case float32Type: + return math.Float32frombits(uint32(lf.numericVal)) + case float64Type: + return math.Float64frombits(uint64(lf.numericVal)) + case errorType, objectType, lazyLoggerType: + return lf.interfaceVal + case noopType: + return nil + default: + return nil + } +} + +// String returns a string representation of the key and value. +func (lf Field) String() string { + return fmt.Sprint(lf.key, ":", lf.Value()) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go new file mode 100644 index 00000000..3832feb5 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/util.go @@ -0,0 +1,54 @@ +package log + +import "fmt" + +// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice +// a la Span.LogFields(). +func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) { + if len(keyValues)%2 != 0 { + return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues)) + } + fields := make([]Field, len(keyValues)/2) + for i := 0; i*2 < len(keyValues); i++ { + key, ok := keyValues[i*2].(string) + if !ok { + return nil, fmt.Errorf( + "non-string key (pair #%d): %T", + i, keyValues[i*2]) + } + switch typedVal := keyValues[i*2+1].(type) { + case bool: + fields[i] = Bool(key, typedVal) + case string: + fields[i] = String(key, typedVal) + case int: + fields[i] = Int(key, typedVal) + case int8: + fields[i] = Int32(key, int32(typedVal)) + case int16: + fields[i] = Int32(key, int32(typedVal)) + case int32: + fields[i] = Int32(key, typedVal) + case int64: + fields[i] = Int64(key, typedVal) + case uint: + fields[i] = Uint64(key, uint64(typedVal)) + case uint64: + fields[i] = Uint64(key, typedVal) + case uint8: + fields[i] = Uint32(key, uint32(typedVal)) + case uint16: + fields[i] = Uint32(key, uint32(typedVal)) + case uint32: + fields[i] = Uint32(key, typedVal) + case float32: + fields[i] = Float32(key, typedVal) + case float64: + fields[i] = Float64(key, typedVal) + default: + // When in doubt, coerce to a string + fields[i] = String(key, fmt.Sprint(typedVal)) + } + } + return fields, nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go new file mode 100644 index 00000000..0d32f692 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/noop.go @@ -0,0 +1,64 @@ +package opentracing + +import "github.com/opentracing/opentracing-go/log" + +// A NoopTracer is a trivial, minimum overhead implementation of Tracer +// for which all operations are no-ops. +// +// The primary use of this implementation is in libraries, such as RPC +// frameworks, that make tracing an optional feature controlled by the +// end user. A no-op implementation allows said libraries to use it +// as the default Tracer and to write instrumentation that does +// not need to keep checking if the tracer instance is nil. +// +// For the same reason, the NoopTracer is the default "global" tracer +// (see GlobalTracer and SetGlobalTracer functions). +// +// WARNING: NoopTracer does not support baggage propagation. +type NoopTracer struct{} + +type noopSpan struct{} +type noopSpanContext struct{} + +var ( + defaultNoopSpanContext = noopSpanContext{} + defaultNoopSpan = noopSpan{} + defaultNoopTracer = NoopTracer{} +) + +const ( + emptyString = "" +) + +// noopSpanContext: +func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} + +// noopSpan: +func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext } +func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan } +func (n noopSpan) BaggageItem(key string) string { return emptyString } +func (n noopSpan) SetTag(key string, value interface{}) Span { return n } +func (n noopSpan) LogFields(fields ...log.Field) {} +func (n noopSpan) LogKV(keyVals ...interface{}) {} +func (n noopSpan) Finish() {} +func (n noopSpan) FinishWithOptions(opts FinishOptions) {} +func (n noopSpan) SetOperationName(operationName string) Span { return n } +func (n noopSpan) Tracer() Tracer { return defaultNoopTracer } +func (n noopSpan) LogEvent(event string) {} +func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {} +func (n noopSpan) Log(data LogData) {} + +// StartSpan belongs to the Tracer interface. +func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { + return defaultNoopSpan +} + +// Inject belongs to the Tracer interface. +func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { + return nil +} + +// Extract belongs to the Tracer interface. +func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { + return nil, ErrSpanContextNotFound +} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go new file mode 100644 index 00000000..b0c275eb --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/propagation.go @@ -0,0 +1,176 @@ +package opentracing + +import ( + "errors" + "net/http" +) + +/////////////////////////////////////////////////////////////////////////////// +// CORE PROPAGATION INTERFACES: +/////////////////////////////////////////////////////////////////////////////// + +var ( + // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or + // Tracer.Extract() is not recognized by the Tracer implementation. + ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format") + + // ErrSpanContextNotFound occurs when the `carrier` passed to + // Tracer.Extract() is valid and uncorrupted but has insufficient + // information to extract a SpanContext. + ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier") + + // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to + // operate on a SpanContext which it is not prepared to handle (for + // example, since it was created by a different tracer implementation). + ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer") + + // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract() + // implementations expect a different type of `carrier` than they are + // given. + ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier") + + // ErrSpanContextCorrupted occurs when the `carrier` passed to + // Tracer.Extract() is of the expected type but is corrupted. + ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier") +) + +/////////////////////////////////////////////////////////////////////////////// +// BUILTIN PROPAGATION FORMATS: +/////////////////////////////////////////////////////////////////////////////// + +// BuiltinFormat is used to demarcate the values within package `opentracing` +// that are intended for use with the Tracer.Inject() and Tracer.Extract() +// methods. +type BuiltinFormat byte + +const ( + // Binary represents SpanContexts as opaque binary data. + // + // For Tracer.Inject(): the carrier must be an `io.Writer`. + // + // For Tracer.Extract(): the carrier must be an `io.Reader`. + Binary BuiltinFormat = iota + + // TextMap represents SpanContexts as key:value string pairs. + // + // Unlike HTTPHeaders, the TextMap format does not restrict the key or + // value character sets in any way. + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + TextMap + + // HTTPHeaders represents SpanContexts as HTTP header string pairs. + // + // Unlike TextMap, the HTTPHeaders format requires that the keys and values + // be valid as HTTP headers as-is (i.e., character casing may be unstable + // and special characters are disallowed in keys, values should be + // URL-escaped, etc). + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + // + // See HTTPHeadersCarrier for an implementation of both TextMapWriter + // and TextMapReader that defers to an http.Header instance for storage. + // For example, Inject(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := span.Tracer().Inject( + // span.Context(), opentracing.HTTPHeaders, carrier) + // + // Or Extract(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // clientContext, err := tracer.Extract( + // opentracing.HTTPHeaders, carrier) + // + HTTPHeaders +) + +// TextMapWriter is the Inject() carrier for the TextMap builtin format. With +// it, the caller can encode a SpanContext for propagation as entries in a map +// of unicode strings. +type TextMapWriter interface { + // Set a key:value pair to the carrier. Multiple calls to Set() for the + // same key leads to undefined behavior. + // + // NOTE: The backing store for the TextMapWriter may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + Set(key, val string) +} + +// TextMapReader is the Extract() carrier for the TextMap builtin format. With it, +// the caller can decode a propagated SpanContext as entries in a map of +// unicode strings. +type TextMapReader interface { + // ForeachKey returns TextMap contents via repeated calls to the `handler` + // function. If any call to `handler` returns a non-nil error, ForeachKey + // terminates and returns that error. + // + // NOTE: The backing store for the TextMapReader may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + // + // The "foreach" callback pattern reduces unnecessary copying in some cases + // and also allows implementations to hold locks while the map is read. + ForeachKey(handler func(key, val string) error) error +} + +// TextMapCarrier allows the use of regular map[string]string +// as both TextMapWriter and TextMapReader. +type TextMapCarrier map[string]string + +// ForeachKey conforms to the TextMapReader interface. +func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error { + for k, v := range c { + if err := handler(k, v); err != nil { + return err + } + } + return nil +} + +// Set implements Set() of opentracing.TextMapWriter +func (c TextMapCarrier) Set(key, val string) { + c[key] = val +} + +// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader. +// +// Example usage for server side: +// +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) +// +// Example usage for client side: +// +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// err := tracer.Inject( +// span.Context(), +// opentracing.HTTPHeaders, +// carrier) +// +type HTTPHeadersCarrier http.Header + +// Set conforms to the TextMapWriter interface. +func (c HTTPHeadersCarrier) Set(key, val string) { + h := http.Header(c) + h.Set(key, val) +} + +// ForeachKey conforms to the TextMapReader interface. +func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { + for k, vals := range c { + for _, v := range vals { + if err := handler(k, v); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go new file mode 100644 index 00000000..0d3fb534 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/span.go @@ -0,0 +1,189 @@ +package opentracing + +import ( + "time" + + "github.com/opentracing/opentracing-go/log" +) + +// SpanContext represents Span state that must propagate to descendant Spans and across process +// boundaries (e.g., a tuple). +type SpanContext interface { + // ForeachBaggageItem grants access to all baggage items stored in the + // SpanContext. + // The handler function will be called for each baggage key/value pair. + // The ordering of items is not guaranteed. + // + // The bool return value indicates if the handler wants to continue iterating + // through the rest of the baggage items; for example if the handler is trying to + // find some baggage item by pattern matching the name, it can return false + // as soon as the item is found to stop further iterations. + ForeachBaggageItem(handler func(k, v string) bool) +} + +// Span represents an active, un-finished span in the OpenTracing system. +// +// Spans are created by the Tracer interface. +type Span interface { + // Sets the end timestamp and finalizes Span state. + // + // With the exception of calls to Context() (which are always allowed), + // Finish() must be the last call made to any span instance, and to do + // otherwise leads to undefined behavior. + Finish() + // FinishWithOptions is like Finish() but with explicit control over + // timestamps and log data. + FinishWithOptions(opts FinishOptions) + + // Context() yields the SpanContext for this Span. Note that the return + // value of Context() is still valid after a call to Span.Finish(), as is + // a call to Span.Context() after a call to Span.Finish(). + Context() SpanContext + + // Sets or changes the operation name. + // + // Returns a reference to this Span for chaining. + SetOperationName(operationName string) Span + + // Adds a tag to the span. + // + // If there is a pre-existing tag set for `key`, it is overwritten. + // + // Tag values can be numeric types, strings, or bools. The behavior of + // other tag value types is undefined at the OpenTracing level. If a + // tracing system does not know how to handle a particular value type, it + // may ignore the tag, but shall not panic. + // + // Returns a reference to this Span for chaining. + SetTag(key string, value interface{}) Span + + // LogFields is an efficient and type-checked way to record key:value + // logging data about a Span, though the programming interface is a little + // more verbose than LogKV(). Here's an example: + // + // span.LogFields( + // log.String("event", "soft error"), + // log.String("type", "cache timeout"), + // log.Int("waited.millis", 1500)) + // + // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData. + LogFields(fields ...log.Field) + + // LogKV is a concise, readable way to record key:value logging data about + // a Span, though unfortunately this also makes it less efficient and less + // type-safe than LogFields(). Here's an example: + // + // span.LogKV( + // "event", "soft error", + // "type", "cache timeout", + // "waited.millis", 1500) + // + // For LogKV (as opposed to LogFields()), the parameters must appear as + // key-value pairs, like + // + // span.LogKV(key1, val1, key2, val2, key3, val3, ...) + // + // The keys must all be strings. The values may be strings, numeric types, + // bools, Go error instances, or arbitrary structs. + // + // (Note to implementors: consider the log.InterleavedKVToFields() helper) + LogKV(alternatingKeyValues ...interface{}) + + // SetBaggageItem sets a key:value pair on this Span and its SpanContext + // that also propagates to descendants of this Span. + // + // SetBaggageItem() enables powerful functionality given a full-stack + // opentracing integration (e.g., arbitrary application data from a mobile + // app can make it, transparently, all the way into the depths of a storage + // system), and with it some powerful costs: use this feature with care. + // + // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to + // *future* causal descendants of the associated Span. + // + // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and + // value is copied into every local *and remote* child of the associated + // Span, and that can add up to a lot of network and cpu overhead. + // + // Returns a reference to this Span for chaining. + SetBaggageItem(restrictedKey, value string) Span + + // Gets the value for a baggage item given its key. Returns the empty string + // if the value isn't found in this Span. + BaggageItem(restrictedKey string) string + + // Provides access to the Tracer that created this Span. + Tracer() Tracer + + // Deprecated: use LogFields or LogKV + LogEvent(event string) + // Deprecated: use LogFields or LogKV + LogEventWithPayload(event string, payload interface{}) + // Deprecated: use LogFields or LogKV + Log(data LogData) +} + +// LogRecord is data associated with a single Span log. Every LogRecord +// instance must specify at least one Field. +type LogRecord struct { + Timestamp time.Time + Fields []log.Field +} + +// FinishOptions allows Span.FinishWithOptions callers to override the finish +// timestamp and provide log data via a bulk interface. +type FinishOptions struct { + // FinishTime overrides the Span's finish time, or implicitly becomes + // time.Now() if FinishTime.IsZero(). + // + // FinishTime must resolve to a timestamp that's >= the Span's StartTime + // (per StartSpanOptions). + FinishTime time.Time + + // LogRecords allows the caller to specify the contents of many LogFields() + // calls with a single slice. May be nil. + // + // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must + // be set explicitly). Also, they must be >= the Span's start timestamp and + // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the + // behavior of FinishWithOptions() is undefined. + // + // If specified, the caller hands off ownership of LogRecords at + // FinishWithOptions() invocation time. + // + // If specified, the (deprecated) BulkLogData must be nil or empty. + LogRecords []LogRecord + + // BulkLogData is DEPRECATED. + BulkLogData []LogData +} + +// LogData is DEPRECATED +type LogData struct { + Timestamp time.Time + Event string + Payload interface{} +} + +// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord +func (ld *LogData) ToLogRecord() LogRecord { + var literalTimestamp time.Time + if ld.Timestamp.IsZero() { + literalTimestamp = time.Now() + } else { + literalTimestamp = ld.Timestamp + } + rval := LogRecord{ + Timestamp: literalTimestamp, + } + if ld.Payload == nil { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + } + } else { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + log.Object("payload", ld.Payload), + } + } + return rval +} diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go new file mode 100644 index 00000000..715f0ced --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/tracer.go @@ -0,0 +1,304 @@ +package opentracing + +import "time" + +// Tracer is a simple, thin interface for Span creation and SpanContext +// propagation. +type Tracer interface { + + // Create, start, and return a new Span with the given `operationName` and + // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows + // from the "functional options" pattern, per + // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis) + // + // A Span with no SpanReference options (e.g., opentracing.ChildOf() or + // opentracing.FollowsFrom()) becomes the root of its own trace. + // + // Examples: + // + // var tracer opentracing.Tracer = ... + // + // // The root-span case: + // sp := tracer.StartSpan("GetFeed") + // + // // The vanilla child span case: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context())) + // + // // All the bells and whistles: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context()), + // opentracing.Tag{"user_agent", loggedReq.UserAgent}, + // opentracing.StartTime(loggedReq.Timestamp), + // ) + // + StartSpan(operationName string, opts ...StartSpanOption) Span + + // Inject() takes the `sm` SpanContext instance and injects it for + // propagation within `carrier`. The actual type of `carrier` depends on + // the value of `format`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see https://godoc.org/context#WithValue). + // + // Example usage (sans error handling): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := tracer.Inject( + // span.Context(), + // opentracing.HTTPHeaders, + // carrier) + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Implementations may return opentracing.ErrUnsupportedFormat if `format` + // is not supported by (or not known by) the implementation. + // + // Implementations may return opentracing.ErrInvalidCarrier or any other + // implementation-specific error if the format is supported but injection + // fails anyway. + // + // See Tracer.Extract(). + Inject(sm SpanContext, format interface{}, carrier interface{}) error + + // Extract() returns a SpanContext instance given `format` and `carrier`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see + // https://godoc.org/golang.org/x/net/context#WithValue). + // + // Example usage (with StartSpan): + // + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) + // + // // ... assuming the ultimate goal here is to resume the trace with a + // // server-side Span: + // var serverSpan opentracing.Span + // if err == nil { + // span = tracer.StartSpan( + // rpcMethodName, ext.RPCServerOption(clientContext)) + // } else { + // span = tracer.StartSpan(rpcMethodName) + // } + // + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Return values: + // - A successful Extract returns a SpanContext instance and a nil error + // - If there was simply no SpanContext to extract in `carrier`, Extract() + // returns (nil, opentracing.ErrSpanContextNotFound) + // - If `format` is unsupported or unrecognized, Extract() returns (nil, + // opentracing.ErrUnsupportedFormat) + // - If there are more fundamental problems with the `carrier` object, + // Extract() may return opentracing.ErrInvalidCarrier, + // opentracing.ErrSpanContextCorrupted, or implementation-specific + // errors. + // + // See Tracer.Inject(). + Extract(format interface{}, carrier interface{}) (SpanContext, error) +} + +// StartSpanOptions allows Tracer.StartSpan() callers and implementors a +// mechanism to override the start timestamp, specify Span References, and make +// a single Tag or multiple Tags available at Span start time. +// +// StartSpan() callers should look at the StartSpanOption interface and +// implementations available in this package. +// +// Tracer implementations can convert a slice of `StartSpanOption` instances +// into a `StartSpanOptions` struct like so: +// +// func StartSpan(opName string, opts ...opentracing.StartSpanOption) { +// sso := opentracing.StartSpanOptions{} +// for _, o := range opts { +// o.Apply(&sso) +// } +// ... +// } +// +type StartSpanOptions struct { + // Zero or more causal references to other Spans (via their SpanContext). + // If empty, start a "root" Span (i.e., start a new trace). + References []SpanReference + + // StartTime overrides the Span's start time, or implicitly becomes + // time.Now() if StartTime.IsZero(). + StartTime time.Time + + // Tags may have zero or more entries; the restrictions on map values are + // identical to those for Span.SetTag(). May be nil. + // + // If specified, the caller hands off ownership of Tags at + // StartSpan() invocation time. + Tags map[string]interface{} +} + +// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan. +// +// StartSpanOption borrows from the "functional options" pattern, per +// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type StartSpanOption interface { + Apply(*StartSpanOptions) +} + +// SpanReferenceType is an enum type describing different categories of +// relationships between two Spans. If Span-2 refers to Span-1, the +// SpanReferenceType describes Span-1 from Span-2's perspective. For example, +// ChildOfRef means that Span-1 created Span-2. +// +// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for +// completion; e.g., Span-2 may be part of a background job enqueued by Span-1, +// or Span-2 may be sitting in a distributed queue behind Span-1. +type SpanReferenceType int + +const ( + // ChildOfRef refers to a parent Span that caused *and* somehow depends + // upon the new child Span. Often (but not always), the parent Span cannot + // finish until the child Span does. + // + // An timing diagram for a ChildOfRef that's blocked on the new Span: + // + // [-Parent Span---------] + // [-Child Span----] + // + // See http://opentracing.io/spec/ + // + // See opentracing.ChildOf() + ChildOfRef SpanReferenceType = iota + + // FollowsFromRef refers to a parent Span that does not depend in any way + // on the result of the new child Span. For instance, one might use + // FollowsFromRefs to describe pipeline stages separated by queues, + // or a fire-and-forget cache insert at the tail end of a web request. + // + // A FollowsFromRef Span is part of the same logical trace as the new Span: + // i.e., the new Span is somehow caused by the work of its FollowsFromRef. + // + // All of the following could be valid timing diagrams for children that + // "FollowFrom" a parent. + // + // [-Parent Span-] [-Child Span-] + // + // + // [-Parent Span--] + // [-Child Span-] + // + // + // [-Parent Span-] + // [-Child Span-] + // + // See http://opentracing.io/spec/ + // + // See opentracing.FollowsFrom() + FollowsFromRef +) + +// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a +// referenced SpanContext. See the SpanReferenceType documentation for +// supported relationships. If SpanReference is created with +// ReferencedContext==nil, it has no effect. Thus it allows for a more concise +// syntax for starting spans: +// +// sc, _ := tracer.Extract(someFormat, someCarrier) +// span := tracer.StartSpan("operation", opentracing.ChildOf(sc)) +// +// The `ChildOf(sc)` option above will not panic if sc == nil, it will just +// not add the parent span reference to the options. +type SpanReference struct { + Type SpanReferenceType + ReferencedContext SpanContext +} + +// Apply satisfies the StartSpanOption interface. +func (r SpanReference) Apply(o *StartSpanOptions) { + if r.ReferencedContext != nil { + o.References = append(o.References, r) + } +} + +// ChildOf returns a StartSpanOption pointing to a dependent parent span. +// If sc == nil, the option has no effect. +// +// See ChildOfRef, SpanReference +func ChildOf(sc SpanContext) SpanReference { + return SpanReference{ + Type: ChildOfRef, + ReferencedContext: sc, + } +} + +// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused +// the child Span but does not directly depend on its result in any way. +// If sc == nil, the option has no effect. +// +// See FollowsFromRef, SpanReference +func FollowsFrom(sc SpanContext) SpanReference { + return SpanReference{ + Type: FollowsFromRef, + ReferencedContext: sc, + } +} + +// StartTime is a StartSpanOption that sets an explicit start timestamp for the +// new Span. +type StartTime time.Time + +// Apply satisfies the StartSpanOption interface. +func (t StartTime) Apply(o *StartSpanOptions) { + o.StartTime = time.Time(t) +} + +// Tags are a generic map from an arbitrary string key to an opaque value type. +// The underlying tracing system is responsible for interpreting and +// serializing the values. +type Tags map[string]interface{} + +// Apply satisfies the StartSpanOption interface. +func (t Tags) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + for k, v := range t { + o.Tags[k] = v + } +} + +// Tag may be passed as a StartSpanOption to add a tag to new spans, +// or its Set method may be used to apply the tag to an existing Span, +// for example: +// +// tracer.StartSpan("opName", Tag{"Key", value}) +// +// or +// +// Tag{"key", value}.Set(span) +type Tag struct { + Key string + Value interface{} +} + +// Apply satisfies the StartSpanOption interface. +func (t Tag) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + o.Tags[t.Key] = t.Value +} + +// Set applies the tag to an existing Span. +func (t Tag) Set(s Span) { + s.SetTag(t.Key, t.Value) +} diff --git a/vendor/github.com/pelletier/go-toml/.dockerignore b/vendor/github.com/pelletier/go-toml/.dockerignore deleted file mode 100644 index 7b588347..00000000 --- a/vendor/github.com/pelletier/go-toml/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -cmd/tomll/tomll -cmd/tomljson/tomljson diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore index e6ba63a5..99e38bbc 100644 --- a/vendor/github.com/pelletier/go-toml/.gitignore +++ b/vendor/github.com/pelletier/go-toml/.gitignore @@ -1,5 +1,2 @@ test_program/test_program_bin fuzz/ -cmd/tomll/tomll -cmd/tomljson/tomljson -cmd/tomltestgen/tomltestgen diff --git a/vendor/github.com/pelletier/go-toml/.travis.yml b/vendor/github.com/pelletier/go-toml/.travis.yml new file mode 100644 index 00000000..c9fbf304 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/.travis.yml @@ -0,0 +1,23 @@ +sudo: false +language: go +go: + - 1.8.x + - 1.9.x + - 1.10.x + - tip +matrix: + allow_failures: + - go: tip + fast_finish: true +script: + - if [ -n "$(go fmt ./...)" ]; then exit 1; fi + - ./test.sh + - ./benchmark.sh $TRAVIS_BRANCH https://github.com/$TRAVIS_REPO_SLUG.git +before_install: + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi +branches: + only: [master] +after_success: + - $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=coverage.out -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md deleted file mode 100644 index 405c911c..00000000 --- a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md +++ /dev/null @@ -1,132 +0,0 @@ -## Contributing - -Thank you for your interest in go-toml! We appreciate you considering -contributing to go-toml! - -The main goal is the project is to provide an easy-to-use TOML -implementation for Go that gets the job done and gets out of your way – -dealing with TOML is probably not the central piece of your project. - -As the single maintainer of go-toml, time is scarce. All help, big or -small, is more than welcomed! - -### Ask questions - -Any question you may have, somebody else might have it too. Always feel -free to ask them on the [issues tracker][issues-tracker]. We will try to -answer them as clearly and quickly as possible, time permitting. - -Asking questions also helps us identify areas where the documentation needs -improvement, or new features that weren't envisioned before. Sometimes, a -seemingly innocent question leads to the fix of a bug. Don't hesitate and -ask away! - -### Improve the documentation - -The best way to share your knowledge and experience with go-toml is to -improve the documentation. Fix a typo, clarify an interface, add an -example, anything goes! - -The documentation is present in the [README][readme] and thorough the -source code. On release, it gets updated on [GoDoc][godoc]. To make a -change to the documentation, create a pull request with your proposed -changes. For simple changes like that, the easiest way to go is probably -the "Fork this project and edit the file" button on Github, displayed at -the top right of the file. Unless it's a trivial change (for example a -typo), provide a little bit of context in your pull request description or -commit message. - -### Report a bug - -Found a bug! Sorry to hear that :(. Help us and other track them down and -fix by reporting it. [File a new bug report][bug-report] on the [issues -tracker][issues-tracker]. The template should provide enough guidance on -what to include. When in doubt: add more details! By reducing ambiguity and -providing more information, it decreases back and forth and saves everyone -time. - -### Code changes - -Want to contribute a patch? Very happy to hear that! - -First, some high-level rules: - -* A short proposal with some POC code is better than a lengthy piece of - text with no code. Code speaks louder than words. -* No backward-incompatible patch will be accepted unless discussed. - Sometimes it's hard, and Go's lack of versioning by default does not - help, but we try not to break people's programs unless we absolutely have - to. -* If you are writing a new feature or extending an existing one, make sure - to write some documentation. -* Bug fixes need to be accompanied with regression tests. -* New code needs to be tested. -* Your commit messages need to explain why the change is needed, even if - already included in the PR description. - -It does sound like a lot, but those best practices are here to save time -overall and continuously improve the quality of the project, which is -something everyone benefits from. - -#### Get started - -The fairly standard code contribution process looks like that: - -1. [Fork the project][fork]. -2. Make your changes, commit on any branch you like. -3. [Open up a pull request][pull-request] -4. Review, potential ask for changes. -5. Merge. You're in! - -Feel free to ask for help! You can create draft pull requests to gather -some early feedback! - -#### Run the tests - -You can run tests for go-toml using Go's test tool: `go test ./...`. -When creating a pull requests, all tests will be ran on Linux on a few Go -versions (Travis CI), and on Windows using the latest Go version -(AppVeyor). - -#### Style - -Try to look around and follow the same format and structure as the rest of -the code. We enforce using `go fmt` on the whole code base. - ---- - -### Maintainers-only - -#### Merge pull request - -Checklist: - -* Passing CI. -* Does not introduce backward-incompatible changes (unless discussed). -* Has relevant doc changes. -* Has relevant unit tests. - -1. Merge using "squash and merge". -2. Make sure to edit the commit message to keep all the useful information - nice and clean. -3. Make sure the commit title is clear and contains the PR number (#123). - -#### New release - -1. Go to [releases][releases]. Click on "X commits to master since this - release". -2. Make note of all the changes. Look for backward incompatible changes, - new features, and bug fixes. -3. Pick the new version using the above and semver. -4. Create a [new release][new-release]. -5. Follow the same format as [1.1.0][release-110]. - -[issues-tracker]: https://github.com/pelletier/go-toml/issues -[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md -[godoc]: https://godoc.org/github.com/pelletier/go-toml -[readme]: ./README.md -[fork]: https://help.github.com/articles/fork-a-repo -[pull-request]: https://help.github.com/en/articles/creating-a-pull-request -[releases]: https://github.com/pelletier/go-toml/releases -[new-release]: https://github.com/pelletier/go-toml/releases/new -[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile deleted file mode 100644 index fffdb016..00000000 --- a/vendor/github.com/pelletier/go-toml/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM golang:1.12-alpine3.9 as builder -WORKDIR /go/src/github.com/pelletier/go-toml -COPY . . -ENV CGO_ENABLED=0 -ENV GOOS=linux -RUN go install ./... - -FROM scratch -COPY --from=builder /go/bin/tomll /usr/bin/tomll -COPY --from=builder /go/bin/tomljson /usr/bin/tomljson -COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 041cdc4a..00000000 --- a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,5 +0,0 @@ -**Issue:** add link to pelletier/go-toml issue here - -Explanation of what this pull request does. - -More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md index 4ef303af..0d357acf 100644 --- a/vendor/github.com/pelletier/go-toml/README.md +++ b/vendor/github.com/pelletier/go-toml/README.md @@ -3,14 +3,13 @@ Go library for the [TOML](https://github.com/mojombo/toml) format. This library supports TOML version -[v0.5.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md) +[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) [![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml) [![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) -[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) -[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) +[![Build Status](https://travis-ci.org/pelletier/go-toml.svg?branch=master)](https://travis-ci.org/pelletier/go-toml) +[![Coverage Status](https://coveralls.io/repos/github/pelletier/go-toml/badge.svg?branch=master)](https://coveralls.io/github/pelletier/go-toml?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) ## Features @@ -99,30 +98,6 @@ Go-toml provides two handy command line tools: go install github.com/pelletier/go-toml/cmd/tomljson tomljson --help ``` - - * `jsontoml`: Reads a JSON file and outputs a TOML representation. - - ``` - go install github.com/pelletier/go-toml/cmd/jsontoml - jsontoml --help - ``` - -### Docker image - -Those tools are also availble as a Docker image from -[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to -use `tomljson`: - -``` -docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml -``` - -Only master (`latest`) and tagged versions are published to dockerhub. You -can build your own image as usual: - -``` -docker build -t go-toml . -``` ## Contribute @@ -132,7 +107,12 @@ much appreciated! ### Run tests -`go test ./...` +You have to make sure two kind of tests run: + +1. The Go unit tests +2. The TOML examples base + +You can run both of them using `./test.sh`. ### Fuzzing diff --git a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml deleted file mode 100644 index d8d7944b..00000000 --- a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml +++ /dev/null @@ -1,167 +0,0 @@ -trigger: -- master - -stages: -- stage: fuzzit - displayName: "Run Fuzzit" - dependsOn: [] - condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) - jobs: - - job: submit - displayName: "Submit" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.13" - inputs: - version: "1.13" - - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" - - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml - - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml - - task: Bash@3 - inputs: - filePath: './fuzzit.sh' - env: - TYPE: fuzzing - FUZZIT_API_KEY: $(FUZZIT_API_KEY) - -- stage: run_checks - displayName: "Check" - dependsOn: [] - jobs: - - job: fmt - displayName: "fmt" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.13" - inputs: - version: "1.13" - - task: Go@0 - displayName: "go fmt ./..." - inputs: - command: 'custom' - customCommand: 'fmt' - arguments: './...' - - job: coverage - displayName: "coverage" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.13" - inputs: - version: "1.13" - - task: Go@0 - displayName: "Generate coverage" - inputs: - command: 'test' - arguments: "-race -coverprofile=coverage.txt -covermode=atomic" - - task: Bash@3 - inputs: - targetType: 'inline' - script: 'bash <(curl -s https://codecov.io/bash) -t $(CODECOV_TOKEN)' - - job: benchmark - displayName: "benchmark" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.13" - inputs: - version: "1.13" - - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" - - task: Bash@3 - inputs: - filePath: './benchmark.sh' - arguments: "master $(Build.Repository.Uri)" - - - job: fuzzing - displayName: "fuzzing" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.13" - inputs: - version: "1.13" - - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" - - script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml - - script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml - - task: Bash@3 - inputs: - filePath: './fuzzit.sh' - env: - TYPE: local-regression - - - job: go_unit_tests - displayName: "unit tests" - strategy: - matrix: - linux 1.13: - goVersion: '1.13' - imageName: 'ubuntu-latest' - mac 1.13: - goVersion: '1.13' - imageName: 'macos-10.13' - windows 1.13: - goVersion: '1.13' - imageName: 'vs2017-win2016' - linux 1.12: - goVersion: '1.12' - imageName: 'ubuntu-latest' - mac 1.12: - goVersion: '1.12' - imageName: 'macos-10.13' - windows 1.12: - goVersion: '1.12' - imageName: 'vs2017-win2016' - pool: - vmImage: $(imageName) - steps: - - task: GoTool@0 - displayName: "Install Go $(goVersion)" - inputs: - version: $(goVersion) - - task: Go@0 - displayName: "go test ./..." - inputs: - command: 'test' - arguments: './...' - -- stage: build_docker_image - displayName: "Build Docker image" - dependsOn: run_checks - jobs: - - job: build - displayName: "Build" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - command: 'build' - Dockerfile: 'Dockerfile' - buildContext: '.' - addPipelineData: false - -- stage: publish_docker_image - displayName: "Publish Docker image" - dependsOn: build_docker_image - condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) - jobs: - - job: publish - displayName: "Publish" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - containerRegistry: 'DockerHub' - repository: 'pelletier/go-toml' - command: 'buildAndPush' - Dockerfile: 'Dockerfile' - buildContext: '.' - tags: 'latest' \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh index 7914fff4..8b8bb528 100644 --- a/vendor/github.com/pelletier/go-toml/benchmark.sh +++ b/vendor/github.com/pelletier/go-toml/benchmark.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -ex +set -e reference_ref=${1:-master} reference_git=${2:-.} @@ -8,6 +8,7 @@ reference_git=${2:-.} if ! `hash benchstat 2>/dev/null`; then echo "Installing benchstat" go get golang.org/x/perf/cmd/benchstat + go install golang.org/x/perf/cmd/benchstat fi tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` @@ -28,4 +29,4 @@ go test -bench=. -benchmem | tee ${local_benchmark} echo "" echo "=== diff" -benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} +benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go index a1406a32..d5fd98c0 100644 --- a/vendor/github.com/pelletier/go-toml/doc.go +++ b/vendor/github.com/pelletier/go-toml/doc.go @@ -1,7 +1,7 @@ // Package toml is a TOML parser and manipulation library. // // This version supports the specification as described in -// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md +// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md // // Marshaling // diff --git a/vendor/github.com/pelletier/go-toml/fuzzit.sh b/vendor/github.com/pelletier/go-toml/fuzzit.sh deleted file mode 100644 index b575a608..00000000 --- a/vendor/github.com/pelletier/go-toml/fuzzit.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -set -xe - -# go-fuzz doesn't support modules yet, so ensure we do everything -# in the old style GOPATH way -export GO111MODULE="off" - -# install go-fuzz -go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build - -# target name can only contain lower-case letters (a-z), digits (0-9) and a dash (-) -# to add another target, make sure to create it with `fuzzit create target` -# before using `fuzzit create job` -TARGET=toml-fuzzer - -go-fuzz-build -libfuzzer -o ${TARGET}.a github.com/pelletier/go-toml -clang -fsanitize=fuzzer ${TARGET}.a -o ${TARGET} - -# install fuzzit for talking to fuzzit.dev service -# or latest version: -# https://github.com/fuzzitdev/fuzzit/releases/latest/download/fuzzit_Linux_x86_64 -wget -q -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.52/fuzzit_Linux_x86_64 -chmod a+x fuzzit - -# TODO: change kkowalczyk to go-toml and create toml-fuzzer target there -./fuzzit create job --type $TYPE go-toml/${TARGET} ${TARGET} diff --git a/vendor/github.com/pelletier/go-toml/go.mod b/vendor/github.com/pelletier/go-toml/go.mod deleted file mode 100644 index a17e9331..00000000 --- a/vendor/github.com/pelletier/go-toml/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/pelletier/go-toml - -go 1.12 - -require ( - github.com/BurntSushi/toml v0.3.1 - github.com/davecgh/go-spew v1.1.1 - gopkg.in/yaml.v2 v2.2.4 -) diff --git a/vendor/github.com/pelletier/go-toml/go.sum b/vendor/github.com/pelletier/go-toml/go.sum deleted file mode 100644 index 1cd2613d..00000000 --- a/vendor/github.com/pelletier/go-toml/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go index e923bc4f..284db646 100644 --- a/vendor/github.com/pelletier/go-toml/keysparsing.go +++ b/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -3,107 +3,79 @@ package toml import ( + "bytes" "errors" "fmt" "unicode" ) // Convert the bare key group string to an array. -// The input supports double quotation and single quotation, +// The input supports double quotation to allow "." inside the key name, // but escape sequences are not supported. Lexers must unescape them beforehand. func parseKey(key string) ([]string, error) { - runes := []rune(key) - var groups []string + groups := []string{} + var buffer bytes.Buffer + inQuotes := false + wasInQuotes := false + ignoreSpace := true + expectDot := false - if len(key) == 0 { - return nil, errors.New("empty key") + for _, char := range key { + if ignoreSpace { + if char == ' ' { + continue + } + ignoreSpace = false + } + switch char { + case '"': + if inQuotes { + groups = append(groups, buffer.String()) + buffer.Reset() + wasInQuotes = true + } + inQuotes = !inQuotes + expectDot = false + case '.': + if inQuotes { + buffer.WriteRune(char) + } else { + if !wasInQuotes { + if buffer.Len() == 0 { + return nil, errors.New("empty table key") + } + groups = append(groups, buffer.String()) + buffer.Reset() + } + ignoreSpace = true + expectDot = false + wasInQuotes = false + } + case ' ': + if inQuotes { + buffer.WriteRune(char) + } else { + expectDot = true + } + default: + if !inQuotes && !isValidBareChar(char) { + return nil, fmt.Errorf("invalid bare character: %c", char) + } + if !inQuotes && expectDot { + return nil, errors.New("what?") + } + buffer.WriteRune(char) + expectDot = false + } } - - idx := 0 - for idx < len(runes) { - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip leading whitespace - } - if idx >= len(runes) { - break - } - r := runes[idx] - if isValidBareChar(r) { - // parse bare key - startIdx := idx - endIdx := -1 - idx++ - for idx < len(runes) { - r = runes[idx] - if isValidBareChar(r) { - idx++ - } else if r == '.' { - endIdx = idx - break - } else if isSpace(r) { - endIdx = idx - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip trailing whitespace - } - if idx < len(runes) && runes[idx] != '.' { - return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) - } - break - } else { - return nil, fmt.Errorf("invalid bare key character: %c", r) - } - } - if endIdx == -1 { - endIdx = idx - } - groups = append(groups, string(runes[startIdx:endIdx])) - } else if r == '\'' { - // parse single quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed single-quoted key") - } - r = runes[idx] - if r == '\'' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '"' { - // parse double quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed double-quoted key") - } - r = runes[idx] - if r == '"' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '.' { - idx++ - if idx >= len(runes) { - return nil, fmt.Errorf("unexpected end of key") - } - r = runes[idx] - if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { - return nil, fmt.Errorf("expecting key part after dot") - } - } else { - return nil, fmt.Errorf("invalid key character: %c", r) - } + if inQuotes { + return nil, errors.New("mismatched quotes") + } + if buffer.Len() > 0 { + groups = append(groups, buffer.String()) } if len(groups) == 0 { - return nil, fmt.Errorf("empty key") + return nil, errors.New("empty key") } return groups, nil } diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go index 735673bd..d11de428 100644 --- a/vendor/github.com/pelletier/go-toml/lexer.go +++ b/vendor/github.com/pelletier/go-toml/lexer.go @@ -223,12 +223,9 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { } possibleDate := l.peekString(35) - dateSubmatches := dateRegexp.FindStringSubmatch(possibleDate) - if dateSubmatches != nil && dateSubmatches[0] != "" { - l.fastForward(len(dateSubmatches[0])) - if dateSubmatches[2] == "" { // no timezone information => local date - return l.lexLocalDate - } + dateMatch := dateRegexp.FindString(possibleDate) + if dateMatch != "" { + l.fastForward(len(dateMatch)) return l.lexDate } @@ -250,13 +247,13 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { l.next() l.emit(tokenLeftCurlyBrace) - return l.lexVoid + return l.lexRvalue } func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { l.next() l.emit(tokenRightCurlyBrace) - return l.lexVoid + return l.lexRvalue } func (l *tomlLexer) lexDate() tomlLexStateFn { @@ -264,11 +261,6 @@ func (l *tomlLexer) lexDate() tomlLexStateFn { return l.lexRvalue } -func (l *tomlLexer) lexLocalDate() tomlLexStateFn { - l.emit(tokenLocalDate) - return l.lexRvalue -} - func (l *tomlLexer) lexTrue() tomlLexStateFn { l.fastForward(4) l.emit(tokenTrue) @@ -317,7 +309,7 @@ func (l *tomlLexer) lexKey() tomlLexStateFn { if err != nil { return l.errorf(err.Error()) } - growingString += "\"" + str + "\"" + growingString += str l.next() continue } else if r == '\'' { @@ -326,15 +318,13 @@ func (l *tomlLexer) lexKey() tomlLexStateFn { if err != nil { return l.errorf(err.Error()) } - growingString += "'" + str + "'" + growingString += str l.next() continue } else if r == '\n' { return l.errorf("keys cannot contain new lines") } else if isSpace(r) { break - } else if r == '.' { - // skip } else if !isValidBareChar(r) { return l.errorf("keys cannot contain %c character", r) } @@ -741,27 +731,7 @@ func (l *tomlLexer) run() { } func init() { - // Regexp for all date/time formats supported by TOML. - // Group 1: nano precision - // Group 2: timezone - // - // /!\ also matches the empty string - // - // Example matches: - //1979-05-27T07:32:00Z - //1979-05-27T00:32:00-07:00 - //1979-05-27T00:32:00.999999-07:00 - //1979-05-27 07:32:00Z - //1979-05-27 00:32:00-07:00 - //1979-05-27 00:32:00.999999-07:00 - //1979-05-27T07:32:00 - //1979-05-27T00:32:00.999999 - //1979-05-27 07:32:00 - //1979-05-27 00:32:00.999999 - //1979-05-27 - //07:32:00 - //00:32:00.999999 - dateRegexp = regexp.MustCompile(`^(?:\d{1,4}-\d{2}-\d{2})?(?:[T ]?\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})?)?`) + dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`) } // Entry point diff --git a/vendor/github.com/pelletier/go-toml/localtime.go b/vendor/github.com/pelletier/go-toml/localtime.go deleted file mode 100644 index a2149e96..00000000 --- a/vendor/github.com/pelletier/go-toml/localtime.go +++ /dev/null @@ -1,281 +0,0 @@ -// Implementation of TOML's local date/time. -// Copied over from https://github.com/googleapis/google-cloud-go/blob/master/civil/civil.go -// to avoid pulling all the Google dependencies. -// -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package civil implements types for civil time, a time-zone-independent -// representation of time that follows the rules of the proleptic -// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second -// minutes. -// -// Because they lack location information, these types do not represent unique -// moments or intervals of time. Use time.Time for that purpose. -package toml - -import ( - "fmt" - "time" -) - -// A LocalDate represents a date (year, month, day). -// -// This type does not include location information, and therefore does not -// describe a unique 24-hour timespan. -type LocalDate struct { - Year int // Year (e.g., 2014). - Month time.Month // Month of the year (January = 1, ...). - Day int // Day of the month, starting at 1. -} - -// LocalDateOf returns the LocalDate in which a time occurs in that time's location. -func LocalDateOf(t time.Time) LocalDate { - var d LocalDate - d.Year, d.Month, d.Day = t.Date() - return d -} - -// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. -func ParseLocalDate(s string) (LocalDate, error) { - t, err := time.Parse("2006-01-02", s) - if err != nil { - return LocalDate{}, err - } - return LocalDateOf(t), nil -} - -// String returns the date in RFC3339 full-date format. -func (d LocalDate) String() string { - return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) -} - -// IsValid reports whether the date is valid. -func (d LocalDate) IsValid() bool { - return LocalDateOf(d.In(time.UTC)) == d -} - -// In returns the time corresponding to time 00:00:00 of the date in the location. -// -// In is always consistent with time.LocalDate, even when time.LocalDate returns a time -// on a different day. For example, if loc is America/Indiana/Vincennes, then both -// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) -// and -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) -// return 23:00:00 on April 30, 1955. -// -// In panics if loc is nil. -func (d LocalDate) In(loc *time.Location) time.Time { - return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) -} - -// AddDays returns the date that is n days in the future. -// n can also be negative to go into the past. -func (d LocalDate) AddDays(n int) LocalDate { - return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) -} - -// DaysSince returns the signed number of days between the date and s, not including the end day. -// This is the inverse operation to AddDays. -func (d LocalDate) DaysSince(s LocalDate) (days int) { - // We convert to Unix time so we do not have to worry about leap seconds: - // Unix time increases by exactly 86400 seconds per day. - deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() - return int(deltaUnix / 86400) -} - -// Before reports whether d1 occurs before d2. -func (d1 LocalDate) Before(d2 LocalDate) bool { - if d1.Year != d2.Year { - return d1.Year < d2.Year - } - if d1.Month != d2.Month { - return d1.Month < d2.Month - } - return d1.Day < d2.Day -} - -// After reports whether d1 occurs after d2. -func (d1 LocalDate) After(d2 LocalDate) bool { - return d2.Before(d1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of d.String(). -func (d LocalDate) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The date is expected to be a string in a format accepted by ParseLocalDate. -func (d *LocalDate) UnmarshalText(data []byte) error { - var err error - *d, err = ParseLocalDate(string(data)) - return err -} - -// A LocalTime represents a time with nanosecond precision. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -// -// This type exists to represent the TIME type in storage-based APIs like BigQuery. -// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. -type LocalTime struct { - Hour int // The hour of the day in 24-hour format; range [0-23] - Minute int // The minute of the hour; range [0-59] - Second int // The second of the minute; range [0-59] - Nanosecond int // The nanosecond of the second; range [0-999999999] -} - -// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs -// in that time's location. It ignores the date. -func LocalTimeOf(t time.Time) LocalTime { - var tm LocalTime - tm.Hour, tm.Minute, tm.Second = t.Clock() - tm.Nanosecond = t.Nanosecond() - return tm -} - -// ParseLocalTime parses a string and returns the time value it represents. -// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After -// the HH:MM:SS part of the string, an optional fractional part may appear, -// consisting of a decimal point followed by one to nine decimal digits. -// (RFC3339 admits only one digit after the decimal point). -func ParseLocalTime(s string) (LocalTime, error) { - t, err := time.Parse("15:04:05.999999999", s) - if err != nil { - return LocalTime{}, err - } - return LocalTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalTime. If Nanoseconds -// is zero, no fractional part will be generated. Otherwise, the result will -// end with a fractional part consisting of a decimal point and nine digits. -func (t LocalTime) String() string { - s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) - if t.Nanosecond == 0 { - return s - } - return s + fmt.Sprintf(".%09d", t.Nanosecond) -} - -// IsValid reports whether the time is valid. -func (t LocalTime) IsValid() bool { - // Construct a non-zero time. - tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) - return LocalTimeOf(tm) == t -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of t.String(). -func (t LocalTime) MarshalText() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The time is expected to be a string in a format accepted by ParseLocalTime. -func (t *LocalTime) UnmarshalText(data []byte) error { - var err error - *t, err = ParseLocalTime(string(data)) - return err -} - -// A LocalDateTime represents a date and time. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -type LocalDateTime struct { - Date LocalDate - Time LocalTime -} - -// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. - -// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. -func LocalDateTimeOf(t time.Time) LocalDateTime { - return LocalDateTime{ - Date: LocalDateOf(t), - Time: LocalTimeOf(t), - } -} - -// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. -// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits -// the time offset but includes an optional fractional time, as described in -// ParseLocalTime. Informally, the accepted format is -// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] -// where the 'T' may be a lower-case 't'. -func ParseLocalDateTime(s string) (LocalDateTime, error) { - t, err := time.Parse("2006-01-02T15:04:05.999999999", s) - if err != nil { - t, err = time.Parse("2006-01-02t15:04:05.999999999", s) - if err != nil { - return LocalDateTime{}, err - } - } - return LocalDateTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalDate. -func (dt LocalDateTime) String() string { - return dt.Date.String() + "T" + dt.Time.String() -} - -// IsValid reports whether the datetime is valid. -func (dt LocalDateTime) IsValid() bool { - return dt.Date.IsValid() && dt.Time.IsValid() -} - -// In returns the time corresponding to the LocalDateTime in the given location. -// -// If the time is missing or ambigous at the location, In returns the same -// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then -// both -// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) -// and -// civil.LocalDateTime{ -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, -// civil.LocalTime{Minute: 30}}.In(loc) -// return 23:30:00 on April 30, 1955. -// -// In panics if loc is nil. -func (dt LocalDateTime) In(loc *time.Location) time.Time { - return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) -} - -// Before reports whether dt1 occurs before dt2. -func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { - return dt1.In(time.UTC).Before(dt2.In(time.UTC)) -} - -// After reports whether dt1 occurs after dt2. -func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { - return dt2.Before(dt1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of dt.String(). -func (dt LocalDateTime) MarshalText() ([]byte, error) { - return []byte(dt.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The datetime is expected to be a string in a format accepted by ParseLocalDateTime -func (dt *LocalDateTime) UnmarshalText(data []byte) error { - var err error - *dt, err = ParseLocalDateTime(string(data)) - return err -} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go index 2a6cfbae..671da556 100644 --- a/vendor/github.com/pelletier/go-toml/marshal.go +++ b/vendor/github.com/pelletier/go-toml/marshal.go @@ -6,28 +6,20 @@ import ( "fmt" "io" "reflect" - "sort" "strconv" "strings" "time" ) -const ( - tagFieldName = "toml" - tagFieldComment = "comment" - tagCommented = "commented" - tagMultiline = "multiline" - tagDefault = "default" -) +const tagKeyMultiline = "multiline" type tomlOpts struct { - name string - comment string - commented bool - multiline bool - include bool - omitempty bool - defaultValue string + name string + comment string + commented bool + multiline bool + include bool + omitempty bool } type encOpts struct { @@ -39,40 +31,10 @@ var encOptsDefaults = encOpts{ quoteMapKeys: false, } -type annotation struct { - tag string - comment string - commented string - multiline string - defaultValue string -} - -var annotationDefault = annotation{ - tag: tagFieldName, - comment: tagFieldComment, - commented: tagCommented, - multiline: tagMultiline, - defaultValue: tagDefault, -} - -type marshalOrder int - -// Orders the Encoder can write the fields to the output stream. -const ( - // Sort fields alphabetically. - OrderAlphabetical marshalOrder = iota + 1 - // Preserve the order the fields are encountered. For example, the order of fields in - // a struct. - OrderPreserve -) - var timeType = reflect.TypeOf(time.Time{}) var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() -var localDateType = reflect.TypeOf(LocalDate{}) -var localTimeType = reflect.TypeOf(LocalTime{}) -var localDateTimeType = reflect.TypeOf(LocalDateTime{}) -// Check if the given marshal type maps to a Tree primitive +// Check if the given marshall type maps to a Tree primitive func isPrimitive(mtype reflect.Type) bool { switch mtype.Kind() { case reflect.Ptr: @@ -88,41 +50,37 @@ func isPrimitive(mtype reflect.Type) bool { case reflect.String: return true case reflect.Struct: - return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType || isCustomMarshaler(mtype) + return mtype == timeType || isCustomMarshaler(mtype) default: return false } } -// Check if the given marshal type maps to a Tree slice or array -func isTreeSequence(mtype reflect.Type) bool { +// Check if the given marshall type maps to a Tree slice +func isTreeSlice(mtype reflect.Type) bool { + switch mtype.Kind() { + case reflect.Slice: + return !isOtherSlice(mtype) + default: + return false + } +} + +// Check if the given marshall type maps to a non-Tree slice +func isOtherSlice(mtype reflect.Type) bool { switch mtype.Kind() { case reflect.Ptr: - return isTreeSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isTree(mtype.Elem()) + return isOtherSlice(mtype.Elem()) + case reflect.Slice: + return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem()) default: return false } } -// Check if the given marshal type maps to a non-Tree slice or array -func isOtherSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isOtherSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return !isTreeSequence(mtype) - default: - return false - } -} - -// Check if the given marshal type maps to a Tree +// Check if the given marshall type maps to a Tree func isTree(mtype reflect.Type) bool { switch mtype.Kind() { - case reflect.Ptr: - return isTree(mtype.Elem()) case reflect.Map: return true case reflect.Struct: @@ -177,9 +135,7 @@ Tree primitive types and corresponding marshal types: float64 float32, float64, pointers to same string string, pointers to same bool bool, pointers to same - time.LocalTime time.LocalTime{}, pointers to same - -For additional flexibility, use the Encoder API. + time.Time time.Time{}, pointers to same */ func Marshal(v interface{}) ([]byte, error) { return NewEncoder(nil).marshal(v) @@ -189,21 +145,13 @@ func Marshal(v interface{}) ([]byte, error) { type Encoder struct { w io.Writer encOpts - annotation - line int - col int - order marshalOrder } // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { return &Encoder{ - w: w, - encOpts: encOptsDefaults, - annotation: annotationDefault, - line: 0, - col: 1, - order: OrderAlphabetical, + w: w, + encOpts: encOptsDefaults, } } @@ -249,49 +197,11 @@ func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { return e } -// Order allows to change in which order fields will be written to the output stream. -func (e *Encoder) Order(ord marshalOrder) *Encoder { - e.order = ord - return e -} - -// SetTagName allows changing default tag "toml" -func (e *Encoder) SetTagName(v string) *Encoder { - e.tag = v - return e -} - -// SetTagComment allows changing default tag "comment" -func (e *Encoder) SetTagComment(v string) *Encoder { - e.comment = v - return e -} - -// SetTagCommented allows changing default tag "commented" -func (e *Encoder) SetTagCommented(v string) *Encoder { - e.commented = v - return e -} - -// SetTagMultiline allows changing default tag "multiline" -func (e *Encoder) SetTagMultiline(v string) *Encoder { - e.multiline = v - return e -} - func (e *Encoder) marshal(v interface{}) ([]byte, error) { mtype := reflect.TypeOf(v) - - switch mtype.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Ptr: - if mtype.Elem().Kind() != reflect.Struct { - return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") - } - default: - return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") + if mtype.Kind() != reflect.Struct { + return []byte{}, errors.New("Only a struct can be marshaled to TOML") } - sval := reflect.ValueOf(v) if isCustomMarshaler(mtype) { return callCustomMarshaler(sval) @@ -302,27 +212,22 @@ func (e *Encoder) marshal(v interface{}) ([]byte, error) { } var buf bytes.Buffer - _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order) + _, err = t.writeTo(&buf, "", "", 0, e.arraysOneElementPerLine) return buf.Bytes(), err } -// Create next tree with a position based on Encoder.line -func (e *Encoder) nextTree() *Tree { - return newTreeWithPosition(Position{Line: e.line, Col: 1}) -} - // Convert given marshal struct or map value to toml tree func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { if mtype.Kind() == reflect.Ptr { return e.valueToTree(mtype.Elem(), mval.Elem()) } - tval := e.nextTree() + tval := newTree() switch mtype.Kind() { case reflect.Struct: for i := 0; i < mtype.NumField(); i++ { mtypef, mvalf := mtype.Field(i), mval.Field(i) - opts := tomlOptions(mtypef, e.annotation) + opts := tomlOptions(mtypef) if opts.include && (!opts.omitempty || !isZero(mvalf)) { val, err := e.valueToToml(mtypef.Type, mvalf) if err != nil { @@ -337,26 +242,7 @@ func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, er } } case reflect.Map: - keys := mval.MapKeys() - if e.order == OrderPreserve && len(keys) > 0 { - // Sorting []reflect.Value is not straight forward. - // - // OrderPreserve will support deterministic results when string is used - // as the key to maps. - typ := keys[0].Type() - kind := keys[0].Kind() - if kind == reflect.String { - ikeys := make([]string, len(keys)) - for i := range keys { - ikeys[i] = keys[i].Interface().(string) - } - sort.Strings(ikeys) - for i := range ikeys { - keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) - } - } - } - for _, key := range keys { + for _, key := range mval.MapKeys() { mvalf := mval.MapIndex(key) val, err := e.valueToToml(mtype.Elem(), mvalf) if err != nil { @@ -404,7 +290,6 @@ func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (int // Convert given marshal value to toml value func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - e.line++ if mtype.Kind() == reflect.Ptr { return e.valueToToml(mtype.Elem(), mval.Elem()) } @@ -413,18 +298,15 @@ func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface return callCustomMarshaler(mval) case isTree(mtype): return e.valueToTree(mtype, mval) - case isTreeSequence(mtype): + case isTreeSlice(mtype): return e.valueToTreeSlice(mtype, mval) - case isOtherSequence(mtype): + case isOtherSlice(mtype): return e.valueToOtherSlice(mtype, mval) default: switch mtype.Kind() { case reflect.Bool: return mval.Bool(), nil case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { - return fmt.Sprint(mval), nil - } return mval.Int(), nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return mval.Uint(), nil @@ -433,7 +315,7 @@ func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface case reflect.String: return mval.String(), nil case reflect.Struct: - return mval.Interface(), nil + return mval.Interface().(time.Time), nil default: return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) } @@ -444,7 +326,7 @@ func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface // Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for // sub-structs, and only definite types can be unmarshaled. func (t *Tree) Unmarshal(v interface{}) error { - d := Decoder{tval: t, tagName: tagFieldName} + d := Decoder{tval: t} return d.unmarshal(v) } @@ -452,11 +334,8 @@ func (t *Tree) Unmarshal(v interface{}) error { // See Marshal() documentation for types mapping table. func (t *Tree) Marshal() ([]byte, error) { var buf bytes.Buffer - _, err := t.WriteTo(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil + err := NewEncoder(&buf).Encode(t) + return buf.Bytes(), err } // Unmarshal parses the TOML-encoded data and stores the result in the value @@ -468,14 +347,6 @@ func (t *Tree) Marshal() ([]byte, error) { // The following struct annotations are supported: // // toml:"Field" Overrides the field's name to map to. -// default:"foo" Provides a default value. -// -// For default values, only fields of the following types are supported: -// * string -// * bool -// * int -// * int64 -// * float64 // // See Marshal() documentation for types mapping table. func Unmarshal(data []byte, v interface{}) error { @@ -491,7 +362,6 @@ type Decoder struct { r io.Reader tval *Tree encOpts - tagName string } // NewDecoder returns a new decoder that reads from r. @@ -499,7 +369,6 @@ func NewDecoder(r io.Reader) *Decoder { return &Decoder{ r: r, encOpts: encOptsDefaults, - tagName: tagFieldName, } } @@ -516,29 +385,13 @@ func (d *Decoder) Decode(v interface{}) error { return d.unmarshal(v) } -// SetTagName allows changing default tag "toml" -func (d *Decoder) SetTagName(v string) *Decoder { - d.tagName = v - return d -} - func (d *Decoder) unmarshal(v interface{}) error { mtype := reflect.TypeOf(v) - if mtype.Kind() != reflect.Ptr { - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") + if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct { + return errors.New("Only a pointer to struct can be unmarshaled from TOML") } - elem := mtype.Elem() - - switch elem.Kind() { - case reflect.Struct, reflect.Map: - default: - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - vv := reflect.ValueOf(v).Elem() - - sval, err := d.valueFromTree(elem, d.tval, &vv) + sval, err := d.valueFromTree(mtype.Elem(), d.tval) if err != nil { return err } @@ -546,92 +399,34 @@ func (d *Decoder) unmarshal(v interface{}) error { return nil } -// Convert toml tree to marshal struct or map, using marshal type. When mval1 -// is non-nil, merge fields into the given value instead of allocating a new one. -func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { +// Convert toml tree to marshal struct or map, using marshal type +func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) + return d.unwrapPointer(mtype, tval) } var mval reflect.Value switch mtype.Kind() { case reflect.Struct: - if mval1 != nil { - mval = *mval1 - } else { - mval = reflect.New(mtype).Elem() - } - + mval = reflect.New(mtype).Elem() for i := 0; i < mtype.NumField(); i++ { mtypef := mtype.Field(i) - an := annotation{tag: d.tagName} - opts := tomlOptions(mtypef, an) + opts := tomlOptions(mtypef) if opts.include { baseKey := opts.name - keysToTry := []string{ - baseKey, - strings.ToLower(baseKey), - strings.ToTitle(baseKey), - strings.ToLower(string(baseKey[0])) + baseKey[1:], - } - - found := false + keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)} for _, key := range keysToTry { exists := tval.Has(key) if !exists { continue } val := tval.Get(key) - fval := mval.Field(i) - mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) + mvalf, err := d.valueFromToml(mtypef.Type, val) if err != nil { return mval, formatError(err, tval.GetPosition(key)) } mval.Field(i).Set(mvalf) - found = true break } - - if !found && opts.defaultValue != "" { - mvalf := mval.Field(i) - var val interface{} - var err error - switch mvalf.Kind() { - case reflect.Bool: - val, err = strconv.ParseBool(opts.defaultValue) - if err != nil { - return mval.Field(i), err - } - case reflect.Int: - val, err = strconv.Atoi(opts.defaultValue) - if err != nil { - return mval.Field(i), err - } - case reflect.String: - val = opts.defaultValue - case reflect.Int64: - val, err = strconv.ParseInt(opts.defaultValue, 10, 64) - if err != nil { - return mval.Field(i), err - } - case reflect.Float64: - val, err = strconv.ParseFloat(opts.defaultValue, 64) - if err != nil { - return mval.Field(i), err - } - default: - return mval.Field(i), fmt.Errorf("unsuported field type for default option") - } - mval.Field(i).Set(reflect.ValueOf(val)) - } - - // save the old behavior above and try to check anonymous structs - if !found && opts.defaultValue == "" && mtypef.Anonymous && mtypef.Type.Kind() == reflect.Struct { - v, err := d.valueFromTree(mtypef.Type, tval, nil) - if err != nil { - return v, err - } - mval.Field(i).Set(v) - } } } case reflect.Map: @@ -639,11 +434,11 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.V for _, key := range tval.Keys() { // TODO: path splits key val := tval.GetPath([]string{key}) - mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) + mvalf, err := d.valueFromToml(mtype.Elem(), val) if err != nil { return mval, formatError(err, tval.GetPosition(key)) } - mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) + mval.SetMapIndex(reflect.ValueOf(key), mvalf) } } return mval, nil @@ -653,7 +448,7 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.V func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { mval := reflect.MakeSlice(mtype, len(tval), len(tval)) for i := 0; i < len(tval); i++ { - val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) + val, err := d.valueFromTree(mtype.Elem(), tval[i]) if err != nil { return mval, err } @@ -666,7 +461,7 @@ func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect. func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { mval := reflect.MakeSlice(mtype, len(tval), len(tval)) for i := 0; i < len(tval); i++ { - val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) + val, err := d.valueFromToml(mtype.Elem(), tval[i]) if err != nil { return mval, err } @@ -675,63 +470,33 @@ func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (r return mval, nil } -// Convert toml value to marshal value, using marshal type. When mval1 is non-nil -// and the given type is a struct value, merge fields into it. -func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { +// Convert toml value to marshal value, using marshal type +func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) { if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) + return d.unwrapPointer(mtype, tval) } - switch t := tval.(type) { + switch tval.(type) { case *Tree: - var mval11 *reflect.Value - if mtype.Kind() == reflect.Struct { - mval11 = mval1 - } - if isTree(mtype) { - return d.valueFromTree(mtype, t, mval11) + return d.valueFromTree(mtype, tval.(*Tree)) } return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) case []*Tree: - if isTreeSequence(mtype) { - return d.valueFromTreeSlice(mtype, t) + if isTreeSlice(mtype) { + return d.valueFromTreeSlice(mtype, tval.([]*Tree)) } return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) case []interface{}: - if isOtherSequence(mtype) { - return d.valueFromOtherSlice(mtype, t) + if isOtherSlice(mtype) { + return d.valueFromOtherSlice(mtype, tval.([]interface{})) } return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) default: switch mtype.Kind() { case reflect.Bool, reflect.Struct: val := reflect.ValueOf(tval) - - switch val.Type() { - case localDateType: - localDate := val.Interface().(LocalDate) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil - } - case localDateTimeType: - localDateTime := val.Interface().(LocalDateTime) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date( - localDateTime.Date.Year, - localDateTime.Date.Month, - localDateTime.Date.Day, - localDateTime.Time.Hour, - localDateTime.Time.Minute, - localDateTime.Time.Second, - localDateTime.Time.Nanosecond, - time.Local)), nil - } - } - - // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime + // if this passes for when mtype is reflect.Struct, tval is a time.Time if !val.Type().ConvertibleTo(mtype) { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } @@ -747,17 +512,10 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *ref return val.Convert(mtype), nil case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: val := reflect.ValueOf(tval) - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { - d, err := time.ParseDuration(val.String()) - if err != nil { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) - } - return reflect.ValueOf(d), nil - } if !val.Type().ConvertibleTo(mtype) { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(mtype).Int()) { + if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Int()) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } @@ -767,11 +525,10 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *ref if !val.Type().ConvertibleTo(mtype) { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - - if val.Convert(reflect.TypeOf(int(1))).Int() < 0 { + if val.Int() < 0 { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Convert(mtype).Uint())) { + if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Int())) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } @@ -781,7 +538,7 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *ref if !val.Type().ConvertibleTo(mtype) { return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(mtype).Float()) { + if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Float()) { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) } @@ -792,15 +549,8 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *ref } } -func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { - var melem *reflect.Value - - if mval1 != nil && !mval1.IsNil() && mtype.Elem().Kind() == reflect.Struct { - elem := mval1.Elem() - melem = &elem - } - - val, err := d.valueFromToml(mtype.Elem(), tval, melem) +func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + val, err := d.valueFromToml(mtype.Elem(), tval) if err != nil { return reflect.ValueOf(nil), err } @@ -809,25 +559,16 @@ func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *ref return mval, nil } -func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { - tag := vf.Tag.Get(an.tag) +func tomlOptions(vf reflect.StructField) tomlOpts { + tag := vf.Tag.Get("toml") parse := strings.Split(tag, ",") var comment string - if c := vf.Tag.Get(an.comment); c != "" { + if c := vf.Tag.Get("comment"); c != "" { comment = c } - commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) - multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) - defaultValue := vf.Tag.Get(tagDefault) - result := tomlOpts{ - name: vf.Name, - comment: comment, - commented: commented, - multiline: multiline, - include: true, - omitempty: false, - defaultValue: defaultValue, - } + commented, _ := strconv.ParseBool(vf.Tag.Get("commented")) + multiline, _ := strconv.ParseBool(vf.Tag.Get(tagKeyMultiline)) + result := tomlOpts{name: vf.Name, comment: comment, commented: commented, multiline: multiline, include: true, omitempty: false} if parse[0] != "" { if parse[0] == "-" && len(parse) == 1 { result.include = false diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml deleted file mode 100644 index 792b72ed..00000000 --- a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml +++ /dev/null @@ -1,39 +0,0 @@ -title = "TOML Marshal Testing" - -[basic_lists] - floats = [12.3,45.6,78.9] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - ints = [8001,8001,8002] - uints = [5002,5003] - strings = ["One","Two","Three"] - -[[subdocptrs]] - name = "Second" - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.second] - name = "Second" - - [subdoc.first] - name = "First" - -[basic] - uint = 5001 - bool = true - float = 123.4 - float64 = 123.456782132399 - int = 5000 - string = "Bite me" - date = 1979-05-27T07:32:00Z - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml index ba5e110b..1c5f98e7 100644 --- a/vendor/github.com/pelletier/go-toml/marshal_test.toml +++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml @@ -4,7 +4,6 @@ title = "TOML Marshal Testing" bool = true date = 1979-05-27T07:32:00Z float = 123.4 - float64 = 123.456782132399 int = 5000 string = "Bite me" uint = 5001 diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go index 1b344fee..2d27599a 100644 --- a/vendor/github.com/pelletier/go-toml/parser.go +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -77,10 +77,8 @@ func (p *tomlParser) parseStart() tomlParserStateFn { return p.parseAssign case tokenEOF: return nil - case tokenError: - p.raiseError(tok, "parsing error: %s", tok.String()) default: - p.raiseError(tok, "unexpected token %s", tok.typ) + p.raiseError(tok, "unexpected token") } return nil } @@ -167,11 +165,6 @@ func (p *tomlParser) parseAssign() tomlParserStateFn { key := p.getToken() p.assume(tokenEqual) - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err.Error()) - } - value := p.parseRvalue() var tableKey []string if len(p.currentTable) > 0 { @@ -180,9 +173,6 @@ func (p *tomlParser) parseAssign() tomlParserStateFn { tableKey = []string{} } - prefixKey := parsedKey[0 : len(parsedKey)-1] - tableKey = append(tableKey, prefixKey...) - // find the table to assign, looking out for arrays of tables var targetNode *Tree switch node := p.tree.GetPath(tableKey).(type) { @@ -190,19 +180,17 @@ func (p *tomlParser) parseAssign() tomlParserStateFn { targetNode = node[len(node)-1] case *Tree: targetNode = node - case nil: - // create intermediate - if err := p.tree.createSubTree(tableKey, key.Position); err != nil { - p.raiseError(key, "could not create intermediate group: %s", err) - } - targetNode = p.tree.GetPath(tableKey).(*Tree) default: p.raiseError(key, "Unknown table type for path: %s", strings.Join(tableKey, ".")) } // assign value to the found table - keyVal := parsedKey[len(parsedKey)-1] + keyVals := []string{key.val} + if len(keyVals) != 1 { + p.raiseError(key, "Invalid key") + } + keyVal := keyVals[0] localKey := []string{keyVal} finalKey := append(tableKey, keyVal) if targetNode.GetPath(localKey) != nil { @@ -313,41 +301,7 @@ func (p *tomlParser) parseRvalue() interface{} { } return val case tokenDate: - layout := time.RFC3339Nano - if !strings.Contains(tok.val, "T") { - layout = strings.Replace(layout, "T", " ", 1) - } - val, err := time.ParseInLocation(layout, tok.val, time.UTC) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLocalDate: - v := strings.Replace(tok.val, " ", "T", -1) - isDateTime := false - isTime := false - for _, c := range v { - if c == 'T' || c == 't' { - isDateTime = true - break - } - if c == ':' { - isTime = true - break - } - } - - var val interface{} - var err error - - if isDateTime { - val, err = ParseLocalDateTime(v) - } else if isTime { - val, err = ParseLocalTime(v) - } else { - val, err = ParseLocalDate(v) - } - + val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC) if err != nil { p.raiseError(tok, "%s", err) } @@ -384,21 +338,18 @@ Loop: case tokenRightCurlyBrace: p.getToken() break Loop - case tokenKey, tokenInteger, tokenString: + case tokenKey: if !tokenIsComma(previous) && previous != nil { p.raiseError(follow, "comma expected between fields in inline table") } key := p.getToken() p.assume(tokenEqual) - - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err) - } - value := p.parseRvalue() - tree.SetPath(parsedKey, value) + tree.Set(key.val, value) case tokenComma: + if previous == nil { + p.raiseError(follow, "inline table cannot start with a comma") + } if tokenIsComma(previous) { p.raiseError(follow, "need field between two commas in inline table") } diff --git a/vendor/github.com/pelletier/go-toml/test.sh b/vendor/github.com/pelletier/go-toml/test.sh new file mode 100644 index 00000000..ba6adf3f --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/test.sh @@ -0,0 +1,88 @@ +#!/bin/bash +# fail out of the script if anything here fails +set -e +set -o pipefail + +# set the path to the present working directory +export GOPATH=`pwd` + +function git_clone() { + path=$1 + branch=$2 + version=$3 + if [ ! -d "src/$path" ]; then + mkdir -p src/$path + git clone https://$path.git src/$path + fi + pushd src/$path + git checkout "$branch" + git reset --hard "$version" + popd +} + +# Remove potential previous runs +rm -rf src test_program_bin toml-test + +go get github.com/pelletier/go-buffruneio +go get github.com/davecgh/go-spew/spew +go get gopkg.in/yaml.v2 +go get github.com/BurntSushi/toml + +# get code for BurntSushi TOML validation +# pinning all to 'HEAD' for version 0.3.x work (TODO: pin to commit hash when tests stabilize) +git_clone github.com/BurntSushi/toml master HEAD +git_clone github.com/BurntSushi/toml-test master HEAD #was: 0.2.0 HEAD + +# build the BurntSushi test application +go build -o toml-test github.com/BurntSushi/toml-test + +# vendorize the current lib for testing +# NOTE: this basically mocks an install without having to go back out to github for code +mkdir -p src/github.com/pelletier/go-toml/cmd +mkdir -p src/github.com/pelletier/go-toml/query +cp *.go *.toml src/github.com/pelletier/go-toml +cp -R cmd/* src/github.com/pelletier/go-toml/cmd +cp -R query/* src/github.com/pelletier/go-toml/query +go build -o test_program_bin src/github.com/pelletier/go-toml/cmd/test_program.go + +# Run basic unit tests +go test github.com/pelletier/go-toml -covermode=count -coverprofile=coverage.out +go test github.com/pelletier/go-toml/cmd/tomljson +go test github.com/pelletier/go-toml/query + +# run the entire BurntSushi test suite +if [[ $# -eq 0 ]] ; then + echo "Running all BurntSushi tests" + ./toml-test ./test_program_bin | tee test_out +else + # run a specific test + test=$1 + test_path='src/github.com/BurntSushi/toml-test/tests' + valid_test="$test_path/valid/$test" + invalid_test="$test_path/invalid/$test" + + if [ -e "$valid_test.toml" ]; then + echo "Valid Test TOML for $test:" + echo "====" + cat "$valid_test.toml" + + echo "Valid Test JSON for $test:" + echo "====" + cat "$valid_test.json" + + echo "Go-TOML Output for $test:" + echo "====" + cat "$valid_test.toml" | ./test_program_bin + fi + + if [ -e "$invalid_test.toml" ]; then + echo "Invalid Test TOML for $test:" + echo "====" + cat "$invalid_test.toml" + + echo "Go-TOML Output for $test:" + echo "====" + echo "go-toml Output:" + cat "$invalid_test.toml" | ./test_program_bin + fi +fi diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go index 36a3fc88..1a908134 100644 --- a/vendor/github.com/pelletier/go-toml/token.go +++ b/vendor/github.com/pelletier/go-toml/token.go @@ -2,6 +2,7 @@ package toml import ( "fmt" + "strconv" "unicode" ) @@ -34,7 +35,6 @@ const ( tokenDoubleLeftBracket tokenDoubleRightBracket tokenDate - tokenLocalDate tokenKeyGroup tokenKeyGroupArray tokenComma @@ -68,8 +68,7 @@ var tokenTypeNames = []string{ ")", "]]", "[[", - "LocalDate", - "LocalDate", + "Date", "KeyGroup", "KeyGroupArray", ",", @@ -96,6 +95,14 @@ func (tt tokenType) String() string { return "Unknown" } +func (t token) Int() int { + if result, err := strconv.Atoi(t.val); err != nil { + panic(err) + } else { + return result + } +} + func (t token) String() string { switch t.typ { case tokenEOF: diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go index 358a9be5..98c185ad 100644 --- a/vendor/github.com/pelletier/go-toml/toml.go +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -27,13 +27,9 @@ type Tree struct { } func newTree() *Tree { - return newTreeWithPosition(Position{}) -} - -func newTreeWithPosition(pos Position) *Tree { return &Tree{ values: make(map[string]interface{}), - position: pos, + position: Position{}, } } @@ -198,10 +194,10 @@ func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { // formatting instructions to the key, that will be reused by Marshal(). func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { subtree := t - for i, intermediateKey := range keys[:len(keys)-1] { + for _, intermediateKey := range keys[:len(keys)-1] { nextTree, exists := subtree.values[intermediateKey] if !exists { - nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) + nextTree = newTree() subtree.values[intermediateKey] = nextTree // add new element here } switch node := nextTree.(type) { @@ -211,7 +207,7 @@ func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interfac // go to most recent element if len(node) == 0 { // create element if it does not exist - subtree.values[intermediateKey] = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) + subtree.values[intermediateKey] = append(node, newTree()) } subtree = node[len(node)-1] } @@ -219,21 +215,19 @@ func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interfac var toInsert interface{} - switch v := value.(type) { + switch value.(type) { case *Tree: - v.comment = opts.Comment + tt := value.(*Tree) + tt.comment = opts.Comment toInsert = value case []*Tree: toInsert = value case *tomlValue: - v.comment = opts.Comment - toInsert = v + tt := value.(*tomlValue) + tt.comment = opts.Comment + toInsert = tt default: - toInsert = &tomlValue{value: value, - comment: opts.Comment, - commented: opts.Commented, - multiline: opts.Multiline, - position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} + toInsert = &tomlValue{value: value, comment: opts.Comment, commented: opts.Commented, multiline: opts.Multiline} } subtree.values[keys[len(keys)-1]] = toInsert @@ -262,35 +256,44 @@ func (t *Tree) SetPath(keys []string, value interface{}) { // SetPathWithComment is the same as SetPath, but allows you to provide comment // information to the key, that will be reused by Marshal(). func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { - t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) -} - -// Delete removes a key from the tree. -// Key is a dot-separated path (e.g. a.b.c). -func (t *Tree) Delete(key string) error { - keys, err := parseKey(key) - if err != nil { - return err + subtree := t + for _, intermediateKey := range keys[:len(keys)-1] { + nextTree, exists := subtree.values[intermediateKey] + if !exists { + nextTree = newTree() + subtree.values[intermediateKey] = nextTree // add new element here + } + switch node := nextTree.(type) { + case *Tree: + subtree = node + case []*Tree: + // go to most recent element + if len(node) == 0 { + // create element if it does not exist + subtree.values[intermediateKey] = append(node, newTree()) + } + subtree = node[len(node)-1] + } } - return t.DeletePath(keys) -} -// DeletePath removes a key from the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -func (t *Tree) DeletePath(keys []string) error { - keyLen := len(keys) - if keyLen == 1 { - delete(t.values, keys[0]) - return nil - } - tree := t.GetPath(keys[:keyLen-1]) - item := keys[keyLen-1] - switch node := tree.(type) { + var toInsert interface{} + + switch value.(type) { case *Tree: - delete(node.values, item) - return nil + tt := value.(*Tree) + tt.comment = comment + toInsert = value + case []*Tree: + toInsert = value + case *tomlValue: + tt := value.(*tomlValue) + tt.comment = comment + toInsert = tt + default: + toInsert = &tomlValue{value: value, comment: comment, commented: commented} } - return errors.New("no such key to delete") + + subtree.values[keys[len(keys)-1]] = toInsert } // createSubTree takes a tree and a key and create the necessary intermediate @@ -302,10 +305,10 @@ func (t *Tree) DeletePath(keys []string) error { // Returns nil on success, error object on failure func (t *Tree) createSubTree(keys []string, pos Position) error { subtree := t - for i, intermediateKey := range keys { + for _, intermediateKey := range keys { nextTree, exists := subtree.values[intermediateKey] if !exists { - tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) + tree := newTree() tree.position = pos subtree.values[intermediateKey] = tree nextTree = tree @@ -334,39 +337,10 @@ func LoadBytes(b []byte) (tree *Tree, err error) { err = errors.New(r.(string)) } }() - - if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { - b = b[4:] - } else if len(b) >= 3 && hasUTF8BOM3(b) { - b = b[3:] - } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { - b = b[2:] - } - tree = parseToml(lexToml(b)) return } -func hasUTF16BigEndianBOM2(b []byte) bool { - return b[0] == 0xFE && b[1] == 0xFF -} - -func hasUTF16LittleEndianBOM2(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE -} - -func hasUTF8BOM3(b []byte) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -func hasUTF32BigEndianBOM4(b []byte) bool { - return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF -} - -func hasUTF32LittleEndianBOM4(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 -} - // LoadReader creates a Tree from any io.Reader. func LoadReader(reader io.Reader) (tree *Tree, err error) { inputBytes, err := ioutil.ReadAll(reader) diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go index 43c63030..e4049e29 100644 --- a/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "math" - "math/big" "reflect" "sort" "strconv" @@ -13,18 +12,6 @@ import ( "time" ) -type valueComplexity int - -const ( - valueSimple valueComplexity = iota + 1 - valueComplex -) - -type sortNode struct { - key string - complexity valueComplexity -} - // Encodes a string to a TOML-compliant multi-line string value // This function is a clone of the existing encodeTomlString function, except that whitespace characters // are preserved. Quotation marks and backslashes are also not escaped. @@ -107,20 +94,12 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen case int64: return strconv.FormatInt(value, 10), nil case float64: - // Default bit length is full 64 - bits := 64 - // Float panics if nan is used - if !math.IsNaN(value) { - // if 32 bit accuracy is enough to exactly show, use 32 - _, acc := big.NewFloat(value).Float32() - if acc == big.Exact { - bits = 32 - } - } + // Ensure a round float does contain a decimal point. Otherwise feeding + // the output back to the parser would convert to an integer. if math.Trunc(value) == value { - return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil + return strings.ToLower(strconv.FormatFloat(value, 'f', 1, 32)), nil } - return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil + return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil case string: if tv.multiline { return "\"\"\"\n" + encodeMultilineTomlString(value) + "\"\"\"", nil @@ -136,12 +115,6 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen return "false", nil case time.Time: return value.Format(time.RFC3339), nil - case LocalDate: - return value.String(), nil - case LocalDateTime: - return value.String(), nil - case LocalTime: - return value.String(), nil case nil: return "", nil } @@ -180,233 +153,117 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen return "", fmt.Errorf("unsupported value type %T: %v", v, v) } -func getTreeArrayLine(trees []*Tree) (line int) { - // get lowest line number that is not 0 - for _, tv := range trees { - if tv.position.Line < line || line == 0 { - line = tv.position.Line - } - } - return -} - -func sortByLines(t *Tree) (vals []sortNode) { - var ( - line int - lines []int - tv *Tree - tom *tomlValue - node sortNode - ) - vals = make([]sortNode, 0) - m := make(map[int]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree: - tv = v.(*Tree) - line = tv.position.Line - node = sortNode{key: k, complexity: valueComplex} - case []*Tree: - line = getTreeArrayLine(v.([]*Tree)) - node = sortNode{key: k, complexity: valueComplex} - default: - tom = v.(*tomlValue) - line = tom.position.Line - node = sortNode{key: k, complexity: valueSimple} - } - lines = append(lines, line) - vals = append(vals, node) - m[line] = node - } - sort.Ints(lines) - - for i, line := range lines { - vals[i] = m[line] - } - - return vals -} - -func sortAlphabetical(t *Tree) (vals []sortNode) { - var ( - node sortNode - simpVals []string - compVals []string - ) - vals = make([]sortNode, 0) - m := make(map[string]sortNode) +func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { + simpleValuesKeys := make([]string, 0) + complexValuesKeys := make([]string, 0) for k := range t.values { v := t.values[k] switch v.(type) { case *Tree, []*Tree: - node = sortNode{key: k, complexity: valueComplex} - compVals = append(compVals, node.key) + complexValuesKeys = append(complexValuesKeys, k) default: - node = sortNode{key: k, complexity: valueSimple} - simpVals = append(simpVals, node.key) + simpleValuesKeys = append(simpleValuesKeys, k) } - vals = append(vals, node) - m[node.key] = node } - // Simples first to match previous implementation - sort.Strings(simpVals) - i := 0 - for _, key := range simpVals { - vals[i] = m[key] - i++ - } + sort.Strings(simpleValuesKeys) + sort.Strings(complexValuesKeys) - sort.Strings(compVals) - for _, key := range compVals { - vals[i] = m[key] - i++ - } + for _, k := range simpleValuesKeys { + v, ok := t.values[k].(*tomlValue) + if !ok { + return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) + } - return vals -} + repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } -func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { - return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical) -} - -func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord marshalOrder) (int64, error) { - var orderedVals []sortNode - - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) - } - - for _, node := range orderedVals { - switch node.complexity { - case valueComplex: - k := node.key - v := t.values[k] - - combinedKey := k - if keyspace != "" { - combinedKey = keyspace + "." + combinedKey + if v.comment != "" { + comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) + start := "# " + if strings.HasPrefix(comment, "#") { + start = "" } - var commented string - if t.commented { - commented = "# " + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") + bytesCount += int64(writtenBytesCountComment) + if errc != nil { + return bytesCount, errc } + } - switch node := v.(type) { - // node has to be of those two types given how keys are sorted above - case *Tree: - tv, ok := t.values[k].(*Tree) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - if tv.comment != "" { - comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - bytesCount, err = node.writeToOrdered(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine, ord) - if err != nil { - return bytesCount, err - } - case []*Tree: - for _, subTree := range node { - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } + var commented string + if v.commented { + commented = "# " + } + writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + } - bytesCount, err = subTree.writeToOrdered(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine, ord) - if err != nil { - return bytesCount, err - } - } - } - default: // Simple - k := node.key - v, ok := t.values[k].(*tomlValue) + for _, k := range complexValuesKeys { + v := t.values[k] + + combinedKey := k + if keyspace != "" { + combinedKey = keyspace + "." + combinedKey + } + var commented string + if t.commented { + commented = "# " + } + + switch node := v.(type) { + // node has to be of those two types given how keys are sorted above + case *Tree: + tv, ok := t.values[k].(*Tree) if !ok { return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) } - - repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - - if v.comment != "" { - comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) + if tv.comment != "" { + comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) start := "# " if strings.HasPrefix(comment, "#") { start = "" } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") + writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) bytesCount += int64(writtenBytesCountComment) if errc != nil { return bytesCount, errc } } - - var commented string - if v.commented { - commented = "# " - } - quotedKey := quoteKeyIfNeeded(k) - writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") bytesCount += int64(writtenBytesCount) if err != nil { return bytesCount, err } + bytesCount, err = node.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + case []*Tree: + for _, subTree := range node { + writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") + bytesCount += int64(writtenBytesCount) + if err != nil { + return bytesCount, err + } + + bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) + if err != nil { + return bytesCount, err + } + } } } return bytesCount, nil } -// quote a key if it does not fit the bare key format (A-Za-z0-9_-) -// quoted keys use the same rules as strings -func quoteKeyIfNeeded(k string) string { - // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain - // keys that have already been quoted. - // not an ideal situation, but good enough of a stop gap. - if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { - return k - } - isBare := true - for _, r := range k { - if !isValidBareChar(r) { - isBare = false - break - } - } - if isBare { - return k - } - return quoteKey(k) -} - -func quoteKey(k string) string { - return "\"" + encodeTomlString(k) + "\"" -} - func writeStrings(w io.Writer, s ...string) (int, error) { var n int for i := range s { @@ -429,11 +286,12 @@ func (t *Tree) WriteTo(w io.Writer) (int64, error) { // Output spans multiple lines, and is suitable for ingest by a TOML parser. // If the conversion cannot be performed, ToString returns a non-nil error. func (t *Tree) ToTomlString() (string, error) { - b, err := t.Marshal() + var buf bytes.Buffer + _, err := t.WriteTo(&buf) if err != nil { return "", err } - return string(b), nil + return buf.String(), nil } // String generates a human-readable representation of the current tree. diff --git a/vendor/github.com/spf13/viper/.editorconfig b/vendor/github.com/spf13/viper/.editorconfig new file mode 100644 index 00000000..63afcbcd --- /dev/null +++ b/vendor/github.com/spf13/viper/.editorconfig @@ -0,0 +1,15 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab + +[{Makefile, *.mk}] +indent_style = tab diff --git a/vendor/github.com/spf13/viper/.gitignore b/vendor/github.com/spf13/viper/.gitignore index d6941f32..89625083 100644 --- a/vendor/github.com/spf13/viper/.gitignore +++ b/vendor/github.com/spf13/viper/.gitignore @@ -1,20 +1,5 @@ +/.idea/ /bin/ /build/ /var/ /vendor/ - -# IDE integration -/.vscode/* -!/.vscode/launch.json -!/.vscode/tasks.json -/.idea/* -!/.idea/codeStyles/ -!/.idea/copyright/ -!/.idea/dataSources.xml -!/.idea/*.iml -!/.idea/externalDependencies.xml -!/.idea/go.imports.xml -!/.idea/modules.xml -!/.idea/runConfigurations/ -!/.idea/scopes/ -!/.idea/sqldialects.xml diff --git a/vendor/github.com/spf13/viper/.golangci.yml b/vendor/github.com/spf13/viper/.golangci.yml index 0ea9249e..a0755ce7 100644 --- a/vendor/github.com/spf13/viper/.golangci.yml +++ b/vendor/github.com/spf13/viper/.golangci.yml @@ -21,4 +21,7 @@ linters: - scopelint - gocyclo - gocognit - - gocritic \ No newline at end of file + - gocritic + +service: + golangci-lint-version: 1.21.x diff --git a/vendor/github.com/spf13/viper/.travis.yml b/vendor/github.com/spf13/viper/.travis.yml deleted file mode 100644 index ed677bbb..00000000 --- a/vendor/github.com/spf13/viper/.travis.yml +++ /dev/null @@ -1,32 +0,0 @@ -go_import_path: github.com/spf13/viper - -language: go - -env: - global: - - GO111MODULE="on" - - GOFLAGS="-mod=readonly" - -go: - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -os: - - linux - - osx - -matrix: - allow_failures: - - go: tip - fast_finish: true - -script: - - go install ./... - - diff -u <(echo -n) <(gofmt -d .) - - go test -v ./... - -after_success: - - go get -u -d github.com/spf13/hugo - - cd $GOPATH/src/github.com/spf13/hugo && make && ./hugo -s docs && cd - diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index a574677f..dfd8034f 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -1,10 +1,13 @@ -![viper logo](https://cloud.githubusercontent.com/assets/173412/10886745/998df88a-8151-11e5-9448-4736db51020d.png) +![Viper](.github/logo.png?raw=true) -Go configuration with fangs! +[![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#configuration) -[![Actions](https://github.com/spf13/viper/workflows/CI/badge.svg)](https://github.com/spf13/viper) +[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/spf13/viper/CI?style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![GoDoc](https://godoc.org/github.com/spf13/viper?status.svg)](https://godoc.org/github.com/spf13/viper) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/spf13/viper) + +**Go configuration with fangs!** Many Go projects are built using Viper including: @@ -400,7 +403,7 @@ in a Key/Value store such as etcd or Consul. These values take precedence over default values, but are overridden by configuration values retrieved from disk, flags, or environment variables. -Viper uses [crypt](https://github.com/xordataexchange/crypt) to retrieve +Viper uses [crypt](https://github.com/bketelsen/crypt) to retrieve configuration from the K/V store, which means that you can store your configuration values encrypted and have them automatically decrypted if you have the correct gpg keyring. Encryption is optional. @@ -412,7 +415,7 @@ independently of it. K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001. ```bash -$ go get github.com/xordataexchange/crypt/bin/crypt +$ go get github.com/bketelsen/crypt/bin/crypt $ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json ``` @@ -435,7 +438,7 @@ err := viper.ReadRemoteConfig() ``` #### Consul -You need to set a key to Consul key/value storage with JSON value containing your desired config. +You need to set a key to Consul key/value storage with JSON value containing your desired config. For example, create a Consul key/value store key `MY_CONSUL_KEY` with value: ```json @@ -454,6 +457,16 @@ fmt.Println(viper.Get("port")) // 8080 fmt.Println(viper.Get("hostname")) // myhostname.com ``` +#### Firestore + +```go +viper.AddRemoteProvider("firestore", "google-cloud-project-id", "collection/document") +viper.SetConfigType("json") // Config's format: "json", "toml", "yaml", "yml" +err := viper.ReadRemoteConfig() +``` + +Of course, you're allowed to use `SecureRemoteProvider` also + ### Remote Key/Value Store Example - Encrypted ```go @@ -728,14 +741,14 @@ Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/maps ### Marshalling to string -You may need to marshal all the settings held in viper into a string rather than write them to a file. +You may need to marshal all the settings held in viper into a string rather than write them to a file. You can use your favorite format's marshaller with the config returned by `AllSettings()`. ```go import ( yaml "gopkg.in/yaml.v2" // ... -) +) func yamlStringSettings() string { c := viper.AllSettings() diff --git a/vendor/github.com/spf13/viper/go.mod b/vendor/github.com/spf13/viper/go.mod index 3d51b8c0..7d108dcc 100644 --- a/vendor/github.com/spf13/viper/go.mod +++ b/vendor/github.com/spf13/viper/go.mod @@ -3,27 +3,22 @@ module github.com/spf13/viper go 1.12 require ( - github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 // indirect + github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c github.com/coreos/bbolt v1.3.2 // indirect - github.com/coreos/etcd v3.3.13+incompatible // indirect - github.com/coreos/go-semver v0.2.0 // indirect github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e // indirect github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/fsnotify/fsnotify v1.4.7 github.com/gogo/protobuf v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect - github.com/google/btree v1.0.0 // indirect - github.com/gorilla/websocket v1.4.0 // indirect + github.com/gorilla/websocket v1.4.2 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.9.0 // indirect github.com/hashicorp/hcl v1.0.0 github.com/jonboulle/clockwork v0.1.0 // indirect - github.com/json-iterator/go v1.1.9 // indirect github.com/magiconair/properties v1.8.1 github.com/mitchellh/mapstructure v1.1.2 - github.com/modern-go/reflect2 v1.0.1 // indirect github.com/pelletier/go-toml v1.2.0 github.com/prometheus/client_golang v0.9.3 // indirect github.com/smartystreets/goconvey v1.6.4 // indirect @@ -36,14 +31,10 @@ require ( github.com/subosito/gotenv v1.2.0 github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect - github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 go.etcd.io/bbolt v1.3.2 // indirect go.uber.org/atomic v1.4.0 // indirect go.uber.org/multierr v1.1.0 // indirect go.uber.org/zap v1.10.0 // indirect - golang.org/x/net v0.0.0-20190522155817-f3200d17e092 // indirect - golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect - google.golang.org/grpc v1.21.0 // indirect gopkg.in/ini.v1 v1.51.0 gopkg.in/yaml.v2 v2.2.4 ) diff --git a/vendor/github.com/spf13/viper/go.sum b/vendor/github.com/spf13/viper/go.sum index 098f5d3a..463aa7db 100644 --- a/vendor/github.com/spf13/viper/go.sum +++ b/vendor/github.com/spf13/viper/go.sum @@ -1,22 +1,46 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/firestore v1.1.0 h1:9x7Bx0A9R5/M9jibeJeZWqjeVEIxYW9fZYqB9a70/bY= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c h1:+0HFd5KSZ/mm3JmhmrDukiId5iR6w4+BdFtfSy4yWIc= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= @@ -27,10 +51,12 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -43,29 +69,77 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0 h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -81,23 +155,39 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= @@ -112,6 +202,10 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzr github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= @@ -141,10 +235,11 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= @@ -152,46 +247,132 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc h1:NCy3Ohtk6Iny5V/reW2Ktypo4zIpWBdRJ1uFMjBxdg8= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0 h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= @@ -200,3 +381,8 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index 265eae7f..f61f4ed7 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -287,7 +287,7 @@ func NewWithOptions(opts ...Option) *Viper { func Reset() { v = New() SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"} - SupportedRemoteProviders = []string{"etcd", "consul"} + SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} } type defaultRemoteProvider struct { @@ -328,7 +328,7 @@ type RemoteProvider interface { var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"} // SupportedRemoteProviders are universally supported remote providers. -var SupportedRemoteProviders = []string{"etcd", "consul"} +var SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { @@ -477,7 +477,7 @@ func (v *Viper) AddConfigPath(in string) { // AddRemoteProvider adds a remote configuration source. // Remote Providers are searched in the order they are added. -// provider is a string value, "etcd" or "consul" are currently supported. +// provider is a string value: "etcd", "consul" or "firestore" are currently supported. // endpoint is the url. etcd requires http://ip:port consul requires ip:port // path is the path in the k/v store to retrieve configuration // To retrieve a config file called myapp.json from /configs/myapp.json @@ -506,14 +506,14 @@ func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { // AddSecureRemoteProvider adds a remote configuration source. // Secure Remote Providers are searched in the order they are added. -// provider is a string value, "etcd" or "consul" are currently supported. +// provider is a string value: "etcd", "consul" or "firestore" are currently supported. // endpoint is the url. etcd requires http://ip:port consul requires ip:port // secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg // path is the path in the k/v store to retrieve configuration // To retrieve a config file called myapp.json from /configs/myapp.json // you should set path to /configs and set config name (SetConfigName()) to // "myapp" -// Secure Remote Providers are implemented with github.com/xordataexchange/crypt +// Secure Remote Providers are implemented with github.com/bketelsen/crypt func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) } @@ -996,11 +996,6 @@ func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { } // BindFlagValue binds a specific key to a FlagValue. -// Example (where serverCmd is a Cobra instance): -// -// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") -// Viper.BindFlagValue("port", serverCmd.Flags().Lookup("port")) -// func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) } func (v *Viper) BindFlagValue(key string, flag FlagValue) error { if flag == nil { @@ -1088,6 +1083,8 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return cast.ToIntSlice(res) + case "stringToString": + return stringToStringConv(flag.ValueString()) default: return flag.ValueString() } @@ -1163,6 +1160,8 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return cast.ToIntSlice(res) + case "stringToString": + return stringToStringConv(flag.ValueString()) default: return flag.ValueString() } @@ -1182,6 +1181,30 @@ func readAsCSV(val string) ([]string, error) { return csvReader.Read() } +// mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/master/string_to_string.go#L79 +// alterations are: errors are swallowed, map[string]interface{} is returned in order to enable cast.ToStringMap +func stringToStringConv(val string) interface{} { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]interface{}{} + } + r := csv.NewReader(strings.NewReader(val)) + ss, err := r.Read() + if err != nil { + return nil + } + out := make(map[string]interface{}, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil + } + out[kv[0]] = kv[1] + } + return out +} + // IsSet checks to see if the key has been set in any of the data locations. // IsSet is case-insensitive for a key. func IsSet(key string) bool { return v.IsSet(key) } @@ -1418,11 +1441,18 @@ func (v *Viper) SafeWriteConfigAs(filename string) error { func (v *Viper) writeConfig(filename string, force bool) error { jww.INFO.Println("Attempting to write configuration to file.") + var configType string + ext := filepath.Ext(filename) - if len(ext) <= 1 { - return fmt.Errorf("filename: %s requires valid extension", filename) + if ext != "" { + configType = ext[1:] + } else { + configType = v.configType } - configType := ext[1:] + if configType == "" { + return fmt.Errorf("config type could not be determined for %s", filename) + } + if !stringInSlice(configType, SupportedExts) { return UnsupportedConfigError(configType) } @@ -1619,7 +1649,7 @@ func (v *Viper) marshalWriter(f afero.File, configType string) error { if sectionName == "default" { sectionName = "" } - cfg.Section(sectionName).Key(keyName).SetValue(Get(key).(string)) + cfg.Section(sectionName).Key(keyName).SetValue(v.Get(key).(string)) } cfg.WriteTo(f) } diff --git a/vendor/github.com/syndtr/gocapability/LICENSE b/vendor/github.com/syndtr/gocapability/LICENSE new file mode 100644 index 00000000..80dd96de --- /dev/null +++ b/vendor/github.com/syndtr/gocapability/LICENSE @@ -0,0 +1,24 @@ +Copyright 2013 Suryandaru Triandana +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/syndtr/gocapability/capability/capability.go b/vendor/github.com/syndtr/gocapability/capability/capability.go new file mode 100644 index 00000000..61a90775 --- /dev/null +++ b/vendor/github.com/syndtr/gocapability/capability/capability.go @@ -0,0 +1,133 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Package capability provides utilities for manipulating POSIX capabilities. +package capability + +type Capabilities interface { + // Get check whether a capability present in the given + // capabilities set. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. + Get(which CapType, what Cap) bool + + // Empty check whether all capability bits of the given capabilities + // set are zero. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. + Empty(which CapType) bool + + // Full check whether all capability bits of the given capabilities + // set are one. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. + Full(which CapType) bool + + // Set sets capabilities of the given capabilities sets. The + // 'which' value should be one or combination (OR'ed) of EFFECTIVE, + // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. + Set(which CapType, caps ...Cap) + + // Unset unsets capabilities of the given capabilities sets. The + // 'which' value should be one or combination (OR'ed) of EFFECTIVE, + // PERMITTED, INHERITABLE, BOUNDING or AMBIENT. + Unset(which CapType, caps ...Cap) + + // Fill sets all bits of the given capabilities kind to one. The + // 'kind' value should be one or combination (OR'ed) of CAPS, + // BOUNDS or AMBS. + Fill(kind CapType) + + // Clear sets all bits of the given capabilities kind to zero. The + // 'kind' value should be one or combination (OR'ed) of CAPS, + // BOUNDS or AMBS. + Clear(kind CapType) + + // String return current capabilities state of the given capabilities + // set as string. The 'which' value should be one of EFFECTIVE, + // PERMITTED, INHERITABLE BOUNDING or AMBIENT + StringCap(which CapType) string + + // String return current capabilities state as string. + String() string + + // Load load actual capabilities value. This will overwrite all + // outstanding changes. + Load() error + + // Apply apply the capabilities settings, so all changes will take + // effect. + Apply(kind CapType) error +} + +// NewPid initializes a new Capabilities object for given pid when +// it is nonzero, or for the current process if pid is 0. +// +// Deprecated: Replace with NewPid2. For example, replace: +// +// c, err := NewPid(0) +// if err != nil { +// return err +// } +// +// with: +// +// c, err := NewPid2(0) +// if err != nil { +// return err +// } +// err = c.Load() +// if err != nil { +// return err +// } +func NewPid(pid int) (Capabilities, error) { + c, err := newPid(pid) + if err != nil { + return c, err + } + err = c.Load() + return c, err +} + +// NewPid2 initializes a new Capabilities object for given pid when +// it is nonzero, or for the current process if pid is 0. This +// does not load the process's current capabilities; to do that you +// must call Load explicitly. +func NewPid2(pid int) (Capabilities, error) { + return newPid(pid) +} + +// NewFile initializes a new Capabilities object for given file path. +// +// Deprecated: Replace with NewFile2. For example, replace: +// +// c, err := NewFile(path) +// if err != nil { +// return err +// } +// +// with: +// +// c, err := NewFile2(path) +// if err != nil { +// return err +// } +// err = c.Load() +// if err != nil { +// return err +// } +func NewFile(path string) (Capabilities, error) { + c, err := newFile(path) + if err != nil { + return c, err + } + err = c.Load() + return c, err +} + +// NewFile2 creates a new initialized Capabilities object for given +// file path. This does not load the process's current capabilities; +// to do that you must call Load explicitly. +func NewFile2(path string) (Capabilities, error) { + return newFile(path) +} diff --git a/vendor/github.com/syndtr/gocapability/capability/capability_linux.go b/vendor/github.com/syndtr/gocapability/capability/capability_linux.go new file mode 100644 index 00000000..1567dc81 --- /dev/null +++ b/vendor/github.com/syndtr/gocapability/capability/capability_linux.go @@ -0,0 +1,642 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package capability + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "strings" + "syscall" +) + +var errUnknownVers = errors.New("unknown capability version") + +const ( + linuxCapVer1 = 0x19980330 + linuxCapVer2 = 0x20071026 + linuxCapVer3 = 0x20080522 +) + +var ( + capVers uint32 + capLastCap Cap +) + +func init() { + var hdr capHeader + capget(&hdr, nil) + capVers = hdr.version + + if initLastCap() == nil { + CAP_LAST_CAP = capLastCap + if capLastCap > 31 { + capUpperMask = (uint32(1) << (uint(capLastCap) - 31)) - 1 + } else { + capUpperMask = 0 + } + } +} + +func initLastCap() error { + if capLastCap != 0 { + return nil + } + + f, err := os.Open("/proc/sys/kernel/cap_last_cap") + if err != nil { + return err + } + defer f.Close() + + var b []byte = make([]byte, 11) + _, err = f.Read(b) + if err != nil { + return err + } + + fmt.Sscanf(string(b), "%d", &capLastCap) + + return nil +} + +func mkStringCap(c Capabilities, which CapType) (ret string) { + for i, first := Cap(0), true; i <= CAP_LAST_CAP; i++ { + if !c.Get(which, i) { + continue + } + if first { + first = false + } else { + ret += ", " + } + ret += i.String() + } + return +} + +func mkString(c Capabilities, max CapType) (ret string) { + ret = "{" + for i := CapType(1); i <= max; i <<= 1 { + ret += " " + i.String() + "=\"" + if c.Empty(i) { + ret += "empty" + } else if c.Full(i) { + ret += "full" + } else { + ret += c.StringCap(i) + } + ret += "\"" + } + ret += " }" + return +} + +func newPid(pid int) (c Capabilities, err error) { + switch capVers { + case linuxCapVer1: + p := new(capsV1) + p.hdr.version = capVers + p.hdr.pid = int32(pid) + c = p + case linuxCapVer2, linuxCapVer3: + p := new(capsV3) + p.hdr.version = capVers + p.hdr.pid = int32(pid) + c = p + default: + err = errUnknownVers + return + } + return +} + +type capsV1 struct { + hdr capHeader + data capData +} + +func (c *capsV1) Get(which CapType, what Cap) bool { + if what > 32 { + return false + } + + switch which { + case EFFECTIVE: + return (1< 32 { + continue + } + + if which&EFFECTIVE != 0 { + c.data.effective |= 1 << uint(what) + } + if which&PERMITTED != 0 { + c.data.permitted |= 1 << uint(what) + } + if which&INHERITABLE != 0 { + c.data.inheritable |= 1 << uint(what) + } + } +} + +func (c *capsV1) Unset(which CapType, caps ...Cap) { + for _, what := range caps { + if what > 32 { + continue + } + + if which&EFFECTIVE != 0 { + c.data.effective &= ^(1 << uint(what)) + } + if which&PERMITTED != 0 { + c.data.permitted &= ^(1 << uint(what)) + } + if which&INHERITABLE != 0 { + c.data.inheritable &= ^(1 << uint(what)) + } + } +} + +func (c *capsV1) Fill(kind CapType) { + if kind&CAPS == CAPS { + c.data.effective = 0x7fffffff + c.data.permitted = 0x7fffffff + c.data.inheritable = 0 + } +} + +func (c *capsV1) Clear(kind CapType) { + if kind&CAPS == CAPS { + c.data.effective = 0 + c.data.permitted = 0 + c.data.inheritable = 0 + } +} + +func (c *capsV1) StringCap(which CapType) (ret string) { + return mkStringCap(c, which) +} + +func (c *capsV1) String() (ret string) { + return mkString(c, BOUNDING) +} + +func (c *capsV1) Load() (err error) { + return capget(&c.hdr, &c.data) +} + +func (c *capsV1) Apply(kind CapType) error { + if kind&CAPS == CAPS { + return capset(&c.hdr, &c.data) + } + return nil +} + +type capsV3 struct { + hdr capHeader + data [2]capData + bounds [2]uint32 + ambient [2]uint32 +} + +func (c *capsV3) Get(which CapType, what Cap) bool { + var i uint + if what > 31 { + i = uint(what) >> 5 + what %= 32 + } + + switch which { + case EFFECTIVE: + return (1< 31 { + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data[i].effective |= 1 << uint(what) + } + if which&PERMITTED != 0 { + c.data[i].permitted |= 1 << uint(what) + } + if which&INHERITABLE != 0 { + c.data[i].inheritable |= 1 << uint(what) + } + if which&BOUNDING != 0 { + c.bounds[i] |= 1 << uint(what) + } + if which&AMBIENT != 0 { + c.ambient[i] |= 1 << uint(what) + } + } +} + +func (c *capsV3) Unset(which CapType, caps ...Cap) { + for _, what := range caps { + var i uint + if what > 31 { + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data[i].effective &= ^(1 << uint(what)) + } + if which&PERMITTED != 0 { + c.data[i].permitted &= ^(1 << uint(what)) + } + if which&INHERITABLE != 0 { + c.data[i].inheritable &= ^(1 << uint(what)) + } + if which&BOUNDING != 0 { + c.bounds[i] &= ^(1 << uint(what)) + } + if which&AMBIENT != 0 { + c.ambient[i] &= ^(1 << uint(what)) + } + } +} + +func (c *capsV3) Fill(kind CapType) { + if kind&CAPS == CAPS { + c.data[0].effective = 0xffffffff + c.data[0].permitted = 0xffffffff + c.data[0].inheritable = 0 + c.data[1].effective = 0xffffffff + c.data[1].permitted = 0xffffffff + c.data[1].inheritable = 0 + } + + if kind&BOUNDS == BOUNDS { + c.bounds[0] = 0xffffffff + c.bounds[1] = 0xffffffff + } + if kind&AMBS == AMBS { + c.ambient[0] = 0xffffffff + c.ambient[1] = 0xffffffff + } +} + +func (c *capsV3) Clear(kind CapType) { + if kind&CAPS == CAPS { + c.data[0].effective = 0 + c.data[0].permitted = 0 + c.data[0].inheritable = 0 + c.data[1].effective = 0 + c.data[1].permitted = 0 + c.data[1].inheritable = 0 + } + + if kind&BOUNDS == BOUNDS { + c.bounds[0] = 0 + c.bounds[1] = 0 + } + if kind&AMBS == AMBS { + c.ambient[0] = 0 + c.ambient[1] = 0 + } +} + +func (c *capsV3) StringCap(which CapType) (ret string) { + return mkStringCap(c, which) +} + +func (c *capsV3) String() (ret string) { + return mkString(c, BOUNDING) +} + +func (c *capsV3) Load() (err error) { + err = capget(&c.hdr, &c.data[0]) + if err != nil { + return + } + + var status_path string + + if c.hdr.pid == 0 { + status_path = fmt.Sprintf("/proc/self/status") + } else { + status_path = fmt.Sprintf("/proc/%d/status", c.hdr.pid) + } + + f, err := os.Open(status_path) + if err != nil { + return + } + b := bufio.NewReader(f) + for { + line, e := b.ReadString('\n') + if e != nil { + if e != io.EOF { + err = e + } + break + } + if strings.HasPrefix(line, "CapB") { + fmt.Sscanf(line[4:], "nd: %08x%08x", &c.bounds[1], &c.bounds[0]) + continue + } + if strings.HasPrefix(line, "CapA") { + fmt.Sscanf(line[4:], "mb: %08x%08x", &c.ambient[1], &c.ambient[0]) + continue + } + } + f.Close() + + return +} + +func (c *capsV3) Apply(kind CapType) (err error) { + if kind&BOUNDS == BOUNDS { + var data [2]capData + err = capget(&c.hdr, &data[0]) + if err != nil { + return + } + if (1< 31 { + if c.data.version == 1 { + return false + } + i = uint(what) >> 5 + what %= 32 + } + + switch which { + case EFFECTIVE: + return (1< 31 { + if c.data.version == 1 { + continue + } + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data.effective[i] |= 1 << uint(what) + } + if which&PERMITTED != 0 { + c.data.data[i].permitted |= 1 << uint(what) + } + if which&INHERITABLE != 0 { + c.data.data[i].inheritable |= 1 << uint(what) + } + } +} + +func (c *capsFile) Unset(which CapType, caps ...Cap) { + for _, what := range caps { + var i uint + if what > 31 { + if c.data.version == 1 { + continue + } + i = uint(what) >> 5 + what %= 32 + } + + if which&EFFECTIVE != 0 { + c.data.effective[i] &= ^(1 << uint(what)) + } + if which&PERMITTED != 0 { + c.data.data[i].permitted &= ^(1 << uint(what)) + } + if which&INHERITABLE != 0 { + c.data.data[i].inheritable &= ^(1 << uint(what)) + } + } +} + +func (c *capsFile) Fill(kind CapType) { + if kind&CAPS == CAPS { + c.data.effective[0] = 0xffffffff + c.data.data[0].permitted = 0xffffffff + c.data.data[0].inheritable = 0 + if c.data.version == 2 { + c.data.effective[1] = 0xffffffff + c.data.data[1].permitted = 0xffffffff + c.data.data[1].inheritable = 0 + } + } +} + +func (c *capsFile) Clear(kind CapType) { + if kind&CAPS == CAPS { + c.data.effective[0] = 0 + c.data.data[0].permitted = 0 + c.data.data[0].inheritable = 0 + if c.data.version == 2 { + c.data.effective[1] = 0 + c.data.data[1].permitted = 0 + c.data.data[1].inheritable = 0 + } + } +} + +func (c *capsFile) StringCap(which CapType) (ret string) { + return mkStringCap(c, which) +} + +func (c *capsFile) String() (ret string) { + return mkString(c, INHERITABLE) +} + +func (c *capsFile) Load() (err error) { + return getVfsCap(c.path, &c.data) +} + +func (c *capsFile) Apply(kind CapType) (err error) { + if kind&CAPS == CAPS { + return setVfsCap(c.path, &c.data) + } + return +} diff --git a/vendor/github.com/syndtr/gocapability/capability/capability_noop.go b/vendor/github.com/syndtr/gocapability/capability/capability_noop.go new file mode 100644 index 00000000..9bb3070c --- /dev/null +++ b/vendor/github.com/syndtr/gocapability/capability/capability_noop.go @@ -0,0 +1,19 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// +build !linux + +package capability + +import "errors" + +func newPid(pid int) (Capabilities, error) { + return nil, errors.New("not supported") +} + +func newFile(path string) (Capabilities, error) { + return nil, errors.New("not supported") +} diff --git a/vendor/github.com/syndtr/gocapability/capability/enum.go b/vendor/github.com/syndtr/gocapability/capability/enum.go new file mode 100644 index 00000000..69381731 --- /dev/null +++ b/vendor/github.com/syndtr/gocapability/capability/enum.go @@ -0,0 +1,268 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package capability + +type CapType uint + +func (c CapType) String() string { + switch c { + case EFFECTIVE: + return "effective" + case PERMITTED: + return "permitted" + case INHERITABLE: + return "inheritable" + case BOUNDING: + return "bounding" + case CAPS: + return "caps" + case AMBIENT: + return "ambient" + } + return "unknown" +} + +const ( + EFFECTIVE CapType = 1 << iota + PERMITTED + INHERITABLE + BOUNDING + AMBIENT + + CAPS = EFFECTIVE | PERMITTED | INHERITABLE + BOUNDS = BOUNDING + AMBS = AMBIENT +) + +//go:generate go run enumgen/gen.go +type Cap int + +// POSIX-draft defined capabilities. +const ( + // In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this + // overrides the restriction of changing file ownership and group + // ownership. + CAP_CHOWN = Cap(0) + + // Override all DAC access, including ACL execute access if + // [_POSIX_ACL] is defined. Excluding DAC access covered by + // CAP_LINUX_IMMUTABLE. + CAP_DAC_OVERRIDE = Cap(1) + + // Overrides all DAC restrictions regarding read and search on files + // and directories, including ACL restrictions if [_POSIX_ACL] is + // defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE. + CAP_DAC_READ_SEARCH = Cap(2) + + // Overrides all restrictions about allowed operations on files, where + // file owner ID must be equal to the user ID, except where CAP_FSETID + // is applicable. It doesn't override MAC and DAC restrictions. + CAP_FOWNER = Cap(3) + + // Overrides the following restrictions that the effective user ID + // shall match the file owner ID when setting the S_ISUID and S_ISGID + // bits on that file; that the effective group ID (or one of the + // supplementary group IDs) shall match the file owner ID when setting + // the S_ISGID bit on that file; that the S_ISUID and S_ISGID bits are + // cleared on successful return from chown(2) (not implemented). + CAP_FSETID = Cap(4) + + // Overrides the restriction that the real or effective user ID of a + // process sending a signal must match the real or effective user ID + // of the process receiving the signal. + CAP_KILL = Cap(5) + + // Allows setgid(2) manipulation + // Allows setgroups(2) + // Allows forged gids on socket credentials passing. + CAP_SETGID = Cap(6) + + // Allows set*uid(2) manipulation (including fsuid). + // Allows forged pids on socket credentials passing. + CAP_SETUID = Cap(7) + + // Linux-specific capabilities + + // Without VFS support for capabilities: + // Transfer any capability in your permitted set to any pid, + // remove any capability in your permitted set from any pid + // With VFS support for capabilities (neither of above, but) + // Add any capability from current's capability bounding set + // to the current process' inheritable set + // Allow taking bits out of capability bounding set + // Allow modification of the securebits for a process + CAP_SETPCAP = Cap(8) + + // Allow modification of S_IMMUTABLE and S_APPEND file attributes + CAP_LINUX_IMMUTABLE = Cap(9) + + // Allows binding to TCP/UDP sockets below 1024 + // Allows binding to ATM VCIs below 32 + CAP_NET_BIND_SERVICE = Cap(10) + + // Allow broadcasting, listen to multicast + CAP_NET_BROADCAST = Cap(11) + + // Allow interface configuration + // Allow administration of IP firewall, masquerading and accounting + // Allow setting debug option on sockets + // Allow modification of routing tables + // Allow setting arbitrary process / process group ownership on + // sockets + // Allow binding to any address for transparent proxying (also via NET_RAW) + // Allow setting TOS (type of service) + // Allow setting promiscuous mode + // Allow clearing driver statistics + // Allow multicasting + // Allow read/write of device-specific registers + // Allow activation of ATM control sockets + CAP_NET_ADMIN = Cap(12) + + // Allow use of RAW sockets + // Allow use of PACKET sockets + // Allow binding to any address for transparent proxying (also via NET_ADMIN) + CAP_NET_RAW = Cap(13) + + // Allow locking of shared memory segments + // Allow mlock and mlockall (which doesn't really have anything to do + // with IPC) + CAP_IPC_LOCK = Cap(14) + + // Override IPC ownership checks + CAP_IPC_OWNER = Cap(15) + + // Insert and remove kernel modules - modify kernel without limit + CAP_SYS_MODULE = Cap(16) + + // Allow ioperm/iopl access + // Allow sending USB messages to any device via /proc/bus/usb + CAP_SYS_RAWIO = Cap(17) + + // Allow use of chroot() + CAP_SYS_CHROOT = Cap(18) + + // Allow ptrace() of any process + CAP_SYS_PTRACE = Cap(19) + + // Allow configuration of process accounting + CAP_SYS_PACCT = Cap(20) + + // Allow configuration of the secure attention key + // Allow administration of the random device + // Allow examination and configuration of disk quotas + // Allow setting the domainname + // Allow setting the hostname + // Allow calling bdflush() + // Allow mount() and umount(), setting up new smb connection + // Allow some autofs root ioctls + // Allow nfsservctl + // Allow VM86_REQUEST_IRQ + // Allow to read/write pci config on alpha + // Allow irix_prctl on mips (setstacksize) + // Allow flushing all cache on m68k (sys_cacheflush) + // Allow removing semaphores + // Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores + // and shared memory + // Allow locking/unlocking of shared memory segment + // Allow turning swap on/off + // Allow forged pids on socket credentials passing + // Allow setting readahead and flushing buffers on block devices + // Allow setting geometry in floppy driver + // Allow turning DMA on/off in xd driver + // Allow administration of md devices (mostly the above, but some + // extra ioctls) + // Allow tuning the ide driver + // Allow access to the nvram device + // Allow administration of apm_bios, serial and bttv (TV) device + // Allow manufacturer commands in isdn CAPI support driver + // Allow reading non-standardized portions of pci configuration space + // Allow DDI debug ioctl on sbpcd driver + // Allow setting up serial ports + // Allow sending raw qic-117 commands + // Allow enabling/disabling tagged queuing on SCSI controllers and sending + // arbitrary SCSI commands + // Allow setting encryption key on loopback filesystem + // Allow setting zone reclaim policy + CAP_SYS_ADMIN = Cap(21) + + // Allow use of reboot() + CAP_SYS_BOOT = Cap(22) + + // Allow raising priority and setting priority on other (different + // UID) processes + // Allow use of FIFO and round-robin (realtime) scheduling on own + // processes and setting the scheduling algorithm used by another + // process. + // Allow setting cpu affinity on other processes + CAP_SYS_NICE = Cap(23) + + // Override resource limits. Set resource limits. + // Override quota limits. + // Override reserved space on ext2 filesystem + // Modify data journaling mode on ext3 filesystem (uses journaling + // resources) + // NOTE: ext2 honors fsuid when checking for resource overrides, so + // you can override using fsuid too + // Override size restrictions on IPC message queues + // Allow more than 64hz interrupts from the real-time clock + // Override max number of consoles on console allocation + // Override max number of keymaps + CAP_SYS_RESOURCE = Cap(24) + + // Allow manipulation of system clock + // Allow irix_stime on mips + // Allow setting the real-time clock + CAP_SYS_TIME = Cap(25) + + // Allow configuration of tty devices + // Allow vhangup() of tty + CAP_SYS_TTY_CONFIG = Cap(26) + + // Allow the privileged aspects of mknod() + CAP_MKNOD = Cap(27) + + // Allow taking of leases on files + CAP_LEASE = Cap(28) + + CAP_AUDIT_WRITE = Cap(29) + CAP_AUDIT_CONTROL = Cap(30) + CAP_SETFCAP = Cap(31) + + // Override MAC access. + // The base kernel enforces no MAC policy. + // An LSM may enforce a MAC policy, and if it does and it chooses + // to implement capability based overrides of that policy, this is + // the capability it should use to do so. + CAP_MAC_OVERRIDE = Cap(32) + + // Allow MAC configuration or state changes. + // The base kernel requires no MAC configuration. + // An LSM may enforce a MAC policy, and if it does and it chooses + // to implement capability based checks on modifications to that + // policy or the data required to maintain it, this is the + // capability it should use to do so. + CAP_MAC_ADMIN = Cap(33) + + // Allow configuring the kernel's syslog (printk behaviour) + CAP_SYSLOG = Cap(34) + + // Allow triggering something that will wake the system + CAP_WAKE_ALARM = Cap(35) + + // Allow preventing system suspends + CAP_BLOCK_SUSPEND = Cap(36) + + // Allow reading audit messages from the kernel + CAP_AUDIT_READ = Cap(37) +) + +var ( + // Highest valid capability of the running kernel. + CAP_LAST_CAP = Cap(63) + + capUpperMask = ^uint32(0) +) diff --git a/vendor/github.com/syndtr/gocapability/capability/enum_gen.go b/vendor/github.com/syndtr/gocapability/capability/enum_gen.go new file mode 100644 index 00000000..b9e6d2d5 --- /dev/null +++ b/vendor/github.com/syndtr/gocapability/capability/enum_gen.go @@ -0,0 +1,129 @@ +// generated file; DO NOT EDIT - use go generate in directory with source + +package capability + +func (c Cap) String() string { + switch c { + case CAP_CHOWN: + return "chown" + case CAP_DAC_OVERRIDE: + return "dac_override" + case CAP_DAC_READ_SEARCH: + return "dac_read_search" + case CAP_FOWNER: + return "fowner" + case CAP_FSETID: + return "fsetid" + case CAP_KILL: + return "kill" + case CAP_SETGID: + return "setgid" + case CAP_SETUID: + return "setuid" + case CAP_SETPCAP: + return "setpcap" + case CAP_LINUX_IMMUTABLE: + return "linux_immutable" + case CAP_NET_BIND_SERVICE: + return "net_bind_service" + case CAP_NET_BROADCAST: + return "net_broadcast" + case CAP_NET_ADMIN: + return "net_admin" + case CAP_NET_RAW: + return "net_raw" + case CAP_IPC_LOCK: + return "ipc_lock" + case CAP_IPC_OWNER: + return "ipc_owner" + case CAP_SYS_MODULE: + return "sys_module" + case CAP_SYS_RAWIO: + return "sys_rawio" + case CAP_SYS_CHROOT: + return "sys_chroot" + case CAP_SYS_PTRACE: + return "sys_ptrace" + case CAP_SYS_PACCT: + return "sys_pacct" + case CAP_SYS_ADMIN: + return "sys_admin" + case CAP_SYS_BOOT: + return "sys_boot" + case CAP_SYS_NICE: + return "sys_nice" + case CAP_SYS_RESOURCE: + return "sys_resource" + case CAP_SYS_TIME: + return "sys_time" + case CAP_SYS_TTY_CONFIG: + return "sys_tty_config" + case CAP_MKNOD: + return "mknod" + case CAP_LEASE: + return "lease" + case CAP_AUDIT_WRITE: + return "audit_write" + case CAP_AUDIT_CONTROL: + return "audit_control" + case CAP_SETFCAP: + return "setfcap" + case CAP_MAC_OVERRIDE: + return "mac_override" + case CAP_MAC_ADMIN: + return "mac_admin" + case CAP_SYSLOG: + return "syslog" + case CAP_WAKE_ALARM: + return "wake_alarm" + case CAP_BLOCK_SUSPEND: + return "block_suspend" + case CAP_AUDIT_READ: + return "audit_read" + } + return "unknown" +} + +// List returns list of all supported capabilities +func List() []Cap { + return []Cap{ + CAP_CHOWN, + CAP_DAC_OVERRIDE, + CAP_DAC_READ_SEARCH, + CAP_FOWNER, + CAP_FSETID, + CAP_KILL, + CAP_SETGID, + CAP_SETUID, + CAP_SETPCAP, + CAP_LINUX_IMMUTABLE, + CAP_NET_BIND_SERVICE, + CAP_NET_BROADCAST, + CAP_NET_ADMIN, + CAP_NET_RAW, + CAP_IPC_LOCK, + CAP_IPC_OWNER, + CAP_SYS_MODULE, + CAP_SYS_RAWIO, + CAP_SYS_CHROOT, + CAP_SYS_PTRACE, + CAP_SYS_PACCT, + CAP_SYS_ADMIN, + CAP_SYS_BOOT, + CAP_SYS_NICE, + CAP_SYS_RESOURCE, + CAP_SYS_TIME, + CAP_SYS_TTY_CONFIG, + CAP_MKNOD, + CAP_LEASE, + CAP_AUDIT_WRITE, + CAP_AUDIT_CONTROL, + CAP_SETFCAP, + CAP_MAC_OVERRIDE, + CAP_MAC_ADMIN, + CAP_SYSLOG, + CAP_WAKE_ALARM, + CAP_BLOCK_SUSPEND, + CAP_AUDIT_READ, + } +} diff --git a/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go b/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go new file mode 100644 index 00000000..3d2bf692 --- /dev/null +++ b/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go @@ -0,0 +1,154 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package capability + +import ( + "syscall" + "unsafe" +) + +type capHeader struct { + version uint32 + pid int32 +} + +type capData struct { + effective uint32 + permitted uint32 + inheritable uint32 +} + +func capget(hdr *capHeader, data *capData) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = e1 + } + return +} + +func capset(hdr *capHeader, data *capData) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = e1 + } + return +} + +// not yet in syscall +const ( + pr_CAP_AMBIENT = 47 + pr_CAP_AMBIENT_IS_SET = uintptr(1) + pr_CAP_AMBIENT_RAISE = uintptr(2) + pr_CAP_AMBIENT_LOWER = uintptr(3) + pr_CAP_AMBIENT_CLEAR_ALL = uintptr(4) +) + +func prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) { + _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0) + if e1 != 0 { + err = e1 + } + return +} + +const ( + vfsXattrName = "security.capability" + + vfsCapVerMask = 0xff000000 + vfsCapVer1 = 0x01000000 + vfsCapVer2 = 0x02000000 + + vfsCapFlagMask = ^vfsCapVerMask + vfsCapFlageffective = 0x000001 + + vfscapDataSizeV1 = 4 * (1 + 2*1) + vfscapDataSizeV2 = 4 * (1 + 2*2) +) + +type vfscapData struct { + magic uint32 + data [2]struct { + permitted uint32 + inheritable uint32 + } + effective [2]uint32 + version int8 +} + +var ( + _vfsXattrName *byte +) + +func init() { + _vfsXattrName, _ = syscall.BytePtrFromString(vfsXattrName) +} + +func getVfsCap(path string, dest *vfscapData) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0) + if e1 != 0 { + if e1 == syscall.ENODATA { + dest.version = 2 + return + } + err = e1 + } + switch dest.magic & vfsCapVerMask { + case vfsCapVer1: + dest.version = 1 + if r0 != vfscapDataSizeV1 { + return syscall.EINVAL + } + dest.data[1].permitted = 0 + dest.data[1].inheritable = 0 + case vfsCapVer2: + dest.version = 2 + if r0 != vfscapDataSizeV2 { + return syscall.EINVAL + } + default: + return syscall.EINVAL + } + if dest.magic&vfsCapFlageffective != 0 { + dest.effective[0] = dest.data[0].permitted | dest.data[0].inheritable + dest.effective[1] = dest.data[1].permitted | dest.data[1].inheritable + } else { + dest.effective[0] = 0 + dest.effective[1] = 0 + } + return +} + +func setVfsCap(path string, data *vfscapData) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var size uintptr + if data.version == 1 { + data.magic = vfsCapVer1 + size = vfscapDataSizeV1 + } else if data.version == 2 { + data.magic = vfsCapVer2 + if data.effective[0] != 0 || data.effective[1] != 0 { + data.magic |= vfsCapFlageffective + } + size = vfscapDataSizeV2 + } else { + return syscall.EINVAL + } + _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/tonistiigi/fsutil/.travis.yml b/vendor/github.com/tonistiigi/fsutil/.travis.yml new file mode 100644 index 00000000..4109d9c8 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/.travis.yml @@ -0,0 +1,23 @@ +dist: trusty +sudo: required + +services: + - docker + +language: go + +go: + - "1.12.x" + +install: + - export GO111MODULE=on + +script: + - go build ./... + - go test -c -o test ./ + - sudo ./test -test.v + - go test -c -o test ./copy + - sudo ./test -test.v + - GOOS=darwin go build ./... + - GOOS=windows go build ./... + - GOARCH=arm GOARM=7 go build ./... diff --git a/vendor/github.com/tonistiigi/fsutil/LICENSE b/vendor/github.com/tonistiigi/fsutil/LICENSE new file mode 100644 index 00000000..7df441d9 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/LICENSE @@ -0,0 +1,22 @@ +MIT + +Copyright 2017 Tõnis Tiigi + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go b/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go new file mode 100644 index 00000000..74f08a15 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go @@ -0,0 +1,20 @@ +// +build linux + +package fsutil + +import ( + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func chtimes(path string, un int64) error { + var utimes [2]unix.Timespec + utimes[0] = unix.NsecToTimespec(un) + utimes[1] = utimes[0] + + if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrap(err, "failed call to UtimesNanoAt") + } + + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go b/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go new file mode 100644 index 00000000..cdd80ec9 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go @@ -0,0 +1,20 @@ +// +build !linux + +package fsutil + +import ( + "os" + "time" +) + +func chtimes(path string, un int64) error { + mtime := time.Unix(0, un) + fi, err := os.Lstat(path) + if err != nil { + return err + } + if fi.Mode()&os.ModeSymlink != 0 { + return nil + } + return os.Chtimes(path, mtime, mtime) +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy.go b/vendor/github.com/tonistiigi/fsutil/copy/copy.go new file mode 100644 index 00000000..27b7d565 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy.go @@ -0,0 +1,412 @@ +package fs + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/containerd/continuity/fs" + "github.com/pkg/errors" +) + +var bufferPool = &sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32*1024) + return &buffer + }, +} + +func rootPath(root, p string, followLinks bool) (string, error) { + p = filepath.Join("/", p) + if p == "/" { + return root, nil + } + if followLinks { + return fs.RootPath(root, p) + } + d, f := filepath.Split(p) + ppath, err := fs.RootPath(root, d) + if err != nil { + return "", err + } + return filepath.Join(ppath, f), nil +} + +func ResolveWildcards(root, src string, followLinks bool) ([]string, error) { + d1, d2 := splitWildcards(src) + if d2 != "" { + p, err := rootPath(root, d1, followLinks) + if err != nil { + return nil, err + } + matches, err := resolveWildcards(p, d2) + if err != nil { + return nil, err + } + for i, m := range matches { + p, err := rel(root, m) + if err != nil { + return nil, err + } + matches[i] = p + } + return matches, nil + } + return []string{d1}, nil +} + +// Copy copies files using `cp -a` semantics. +// Copy is likely unsafe to be used in non-containerized environments. +func Copy(ctx context.Context, srcRoot, src, dstRoot, dst string, opts ...Opt) error { + var ci CopyInfo + for _, o := range opts { + o(&ci) + } + ensureDstPath := dst + if d, f := filepath.Split(dst); f != "" && f != "." { + ensureDstPath = d + } + if ensureDstPath != "" { + ensureDstPath, err := fs.RootPath(dstRoot, ensureDstPath) + if err != nil { + return err + } + if err := MkdirAll(ensureDstPath, 0755, ci.Chown, ci.Utime); err != nil { + return err + } + } + + dst, err := fs.RootPath(dstRoot, filepath.Clean(dst)) + if err != nil { + return err + } + + c := newCopier(ci.Chown, ci.Utime, ci.Mode, ci.XAttrErrorHandler) + srcs := []string{src} + + if ci.AllowWildcards { + matches, err := ResolveWildcards(srcRoot, src, ci.FollowLinks) + if err != nil { + return err + } + if len(matches) == 0 { + return errors.Errorf("no matches found: %s", src) + } + srcs = matches + } + + for _, src := range srcs { + srcFollowed, err := rootPath(srcRoot, src, ci.FollowLinks) + if err != nil { + return err + } + dst, err := c.prepareTargetDir(srcFollowed, src, dst, ci.CopyDirContents) + if err != nil { + return err + } + if err := c.copy(ctx, srcFollowed, dst, false); err != nil { + return err + } + } + + return nil +} + +func (c *copier) prepareTargetDir(srcFollowed, src, destPath string, copyDirContents bool) (string, error) { + fiSrc, err := os.Lstat(srcFollowed) + if err != nil { + return "", err + } + + fiDest, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + return "", errors.Wrap(err, "failed to lstat destination path") + } + } + + if (!copyDirContents && fiSrc.IsDir() && fiDest != nil) || (!fiSrc.IsDir() && fiDest != nil && fiDest.IsDir()) { + destPath = filepath.Join(destPath, filepath.Base(src)) + } + + target := filepath.Dir(destPath) + + if copyDirContents && fiSrc.IsDir() && fiDest == nil { + target = destPath + } + if err := MkdirAll(target, 0755, c.chown, c.utime); err != nil { + return "", err + } + + return destPath, nil +} + +type User struct { + Uid, Gid int +} + +type Chowner func(*User) (*User, error) + +type XAttrErrorHandler func(dst, src, xattrKey string, err error) error + +type CopyInfo struct { + Chown Chowner + Utime *time.Time + AllowWildcards bool + Mode *int + XAttrErrorHandler XAttrErrorHandler + CopyDirContents bool + FollowLinks bool +} + +type Opt func(*CopyInfo) + +func WithCopyInfo(ci CopyInfo) func(*CopyInfo) { + return func(c *CopyInfo) { + *c = ci + } +} + +func WithChown(uid, gid int) Opt { + return func(ci *CopyInfo) { + ci.Chown = func(*User) (*User, error) { + return &User{Uid: uid, Gid: gid}, nil + } + } +} + +func AllowWildcards(ci *CopyInfo) { + ci.AllowWildcards = true +} + +func WithXAttrErrorHandler(h XAttrErrorHandler) Opt { + return func(ci *CopyInfo) { + ci.XAttrErrorHandler = h + } +} + +func AllowXAttrErrors(ci *CopyInfo) { + h := func(string, string, string, error) error { + return nil + } + WithXAttrErrorHandler(h)(ci) +} + +type copier struct { + chown Chowner + utime *time.Time + mode *int + inodes map[uint64]string + xattrErrorHandler XAttrErrorHandler +} + +func newCopier(chown Chowner, tm *time.Time, mode *int, xeh XAttrErrorHandler) *copier { + if xeh == nil { + xeh = func(dst, src, key string, err error) error { + return err + } + } + return &copier{inodes: map[uint64]string{}, chown: chown, utime: tm, xattrErrorHandler: xeh, mode: mode} +} + +// dest is always clean +func (c *copier) copy(ctx context.Context, src, target string, overwriteTargetMetadata bool) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + fi, err := os.Lstat(src) + if err != nil { + return errors.Wrapf(err, "failed to stat %s", src) + } + + if !fi.IsDir() { + if err := ensureEmptyFileTarget(target); err != nil { + return err + } + } + + copyFileInfo := true + + switch { + case fi.IsDir(): + if created, err := c.copyDirectory(ctx, src, target, fi, overwriteTargetMetadata); err != nil { + return err + } else if !overwriteTargetMetadata { + copyFileInfo = created + } + case (fi.Mode() & os.ModeType) == 0: + link, err := getLinkSource(target, fi, c.inodes) + if err != nil { + return errors.Wrap(err, "failed to get hardlink") + } + if link != "" { + if err := os.Link(link, target); err != nil { + return errors.Wrap(err, "failed to create hard link") + } + } else if err := copyFile(src, target); err != nil { + return errors.Wrap(err, "failed to copy files") + } + case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink: + link, err := os.Readlink(src) + if err != nil { + return errors.Wrapf(err, "failed to read link: %s", src) + } + if err := os.Symlink(link, target); err != nil { + return errors.Wrapf(err, "failed to create symlink: %s", target) + } + case (fi.Mode() & os.ModeDevice) == os.ModeDevice: + if err := copyDevice(target, fi); err != nil { + return errors.Wrapf(err, "failed to create device") + } + default: + // TODO: Support pipes and sockets + return errors.Wrapf(err, "unsupported mode %s", fi.Mode()) + } + + if copyFileInfo { + if err := c.copyFileInfo(fi, target); err != nil { + return errors.Wrap(err, "failed to copy file info") + } + + if err := copyXAttrs(target, src, c.xattrErrorHandler); err != nil { + return errors.Wrap(err, "failed to copy xattrs") + } + } + return nil +} + +func (c *copier) copyDirectory(ctx context.Context, src, dst string, stat os.FileInfo, overwriteTargetMetadata bool) (bool, error) { + if !stat.IsDir() { + return false, errors.Errorf("source is not directory") + } + + created := false + + if st, err := os.Lstat(dst); err != nil { + if !os.IsNotExist(err) { + return false, err + } + created = true + if err := os.Mkdir(dst, stat.Mode()); err != nil { + return created, errors.Wrapf(err, "failed to mkdir %s", dst) + } + } else if !st.IsDir() { + return false, errors.Errorf("cannot copy to non-directory: %s", dst) + } else if overwriteTargetMetadata { + if err := os.Chmod(dst, stat.Mode()); err != nil { + return false, errors.Wrapf(err, "failed to chmod on %s", dst) + } + } + + fis, err := ioutil.ReadDir(src) + if err != nil { + return false, errors.Wrapf(err, "failed to read %s", src) + } + + for _, fi := range fis { + if err := c.copy(ctx, filepath.Join(src, fi.Name()), filepath.Join(dst, fi.Name()), true); err != nil { + return false, err + } + } + + return created, nil +} + +func ensureEmptyFileTarget(dst string) error { + fi, err := os.Lstat(dst) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.Wrap(err, "failed to lstat file target") + } + if fi.IsDir() { + return errors.Errorf("cannot replace to directory %s with file", dst) + } + return os.Remove(dst) +} + +func containsWildcards(name string) bool { + isWindows := runtime.GOOS == "windows" + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' && !isWindows { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func splitWildcards(p string) (d1, d2 string) { + parts := strings.Split(filepath.Join(p), string(filepath.Separator)) + var p1, p2 []string + var found bool + for _, p := range parts { + if !found && containsWildcards(p) { + found = true + } + if p == "" { + p = "/" + } + if !found { + p1 = append(p1, p) + } else { + p2 = append(p2, p) + } + } + return filepath.Join(p1...), filepath.Join(p2...) +} + +func resolveWildcards(basePath, comp string) ([]string, error) { + var out []string + err := filepath.Walk(basePath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := rel(basePath, path) + if err != nil { + return err + } + if rel == "." { + return nil + } + if match, _ := filepath.Match(comp, rel); !match { + return nil + } + out = append(out, path) + if info.IsDir() { + return filepath.SkipDir + } + return nil + }) + if err != nil { + return nil, err + } + return out, nil +} + +// rel makes a path relative to base path. Same as `filepath.Rel` but can also +// handle UUID paths in windows. +func rel(basepath, targpath string) (string, error) { + // filepath.Rel can't handle UUID paths in windows + if runtime.GOOS == "windows" { + pfx := basepath + `\` + if strings.HasPrefix(targpath, pfx) { + p := strings.TrimPrefix(targpath, pfx) + if p == "" { + p = "." + } + return p, nil + } + } + return filepath.Rel(basepath, targpath) +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_darwin.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_darwin.go new file mode 100644 index 00000000..2882dfda --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_darwin.go @@ -0,0 +1,84 @@ +// +build darwin + +package fs + +import ( + "io" + "os" + "syscall" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// math.MaxInt32 { + desired = int(math.MaxInt32) + } else { + desired = int(size - written) + } + + n, err := unix.CopyFileRange(int(src.Fd()), nil, int(dst.Fd()), nil, desired, 0) + if err != nil { + if (err != unix.ENOSYS && err != unix.EXDEV && err != unix.EPERM) || !first { + return errors.Wrap(err, "copy file range failed") + } + + buf := bufferPool.Get().(*[]byte) + _, err = io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + return errors.Wrap(err, "userspace copy failed") + } + + first = false + written += int64(n) + } + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go new file mode 100644 index 00000000..cbd784e5 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go @@ -0,0 +1,28 @@ +// +build !windows + +package fs + +import ( + "github.com/pkg/errors" + + "github.com/containerd/continuity/sysx" +) + +// copyXAttrs requires xeh to be non-nil +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + xattrKeys, err := sysx.LListxattr(src) + if err != nil { + return xeh(dst, src, "", errors.Wrapf(err, "failed to list xattrs on %s", src)) + } + for _, xattr := range xattrKeys { + data, err := sysx.LGetxattr(src, xattr) + if err != nil { + return xeh(dst, src, xattr, errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)) + } + if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { + return xeh(dst, src, xattr, errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)) + } + } + + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go new file mode 100644 index 00000000..e80ee789 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go @@ -0,0 +1,61 @@ +// +build solaris darwin freebsd + +package fs + +import ( + "os" + "syscall" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func getUidGid(fi os.FileInfo) (uid, gid int) { + st := fi.Sys().(*syscall.Stat_t) + return int(st.Uid), int(st.Gid) +} + +func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { + st := fi.Sys().(*syscall.Stat_t) + chown := c.chown + uid, gid := getUidGid(fi) + old := &User{Uid: uid, Gid: gid} + if chown == nil { + chown = func(u *User) (*User, error) { + return u, nil + } + } + if err := Chown(name, old, chown); err != nil { + return errors.Wrapf(err, "failed to chown %s", name) + } + + m := fi.Mode() + if c.mode != nil { + m = (m & ^os.FileMode(0777)) | os.FileMode(*c.mode&0777) + } + if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { + if err := os.Chmod(name, m); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + } + + if c.utime != nil { + if err := Utimes(name, c.utime); err != nil { + return err + } + } else { + timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) + } + } + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go new file mode 100644 index 00000000..330c0e3f --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go @@ -0,0 +1,48 @@ +package fs + +import ( + "io" + "os" + + "github.com/pkg/errors" +) + +func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + + // TODO: copy windows specific metadata + + return nil +} + +func copyFile(source, target string) error { + src, err := os.Open(source) + if err != nil { + return errors.Wrapf(err, "failed to open source %s", source) + } + defer src.Close() + tgt, err := os.Create(target) + if err != nil { + return errors.Wrapf(err, "failed to open target %s", target) + } + defer tgt.Close() + + return copyFileContent(tgt, src) +} + +func copyFileContent(dst, src *os.File) error { + buf := bufferPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + return err +} + +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + return errors.New("device copy not supported") +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/hardlink.go b/vendor/github.com/tonistiigi/fsutil/copy/hardlink.go new file mode 100644 index 00000000..38da9381 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/hardlink.go @@ -0,0 +1,27 @@ +package fs + +import "os" + +// GetLinkInfo returns an identifier representing the node a hardlink is pointing +// to. If the file is not hard linked then 0 will be returned. +func GetLinkInfo(fi os.FileInfo) (uint64, bool) { + return getLinkInfo(fi) +} + +// getLinkSource returns a path for the given name and +// file info to its link source in the provided inode +// map. If the given file name is not in the map and +// has other links, it is added to the inode map +// to be a source for other link locations. +func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) { + inode, isHardlink := getLinkInfo(fi) + if !isHardlink { + return "", nil + } + + path, ok := inodes[inode] + if !ok { + inodes[inode] = name + } + return path, nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go b/vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go new file mode 100644 index 00000000..3b825c94 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go @@ -0,0 +1,17 @@ +// +build !windows + +package fs + +import ( + "os" + "syscall" +) + +func getLinkInfo(fi os.FileInfo) (uint64, bool) { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return 0, false + } + + return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1 +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/hardlink_windows.go b/vendor/github.com/tonistiigi/fsutil/copy/hardlink_windows.go new file mode 100644 index 00000000..ad8845a7 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/hardlink_windows.go @@ -0,0 +1,7 @@ +package fs + +import "os" + +func getLinkInfo(fi os.FileInfo) (uint64, bool) { + return 0, false +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go b/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go new file mode 100644 index 00000000..b5eeb90d --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go @@ -0,0 +1,83 @@ +package fs + +import ( + "os" + "syscall" + "time" + + "github.com/pkg/errors" +) + +func Chown(p string, old *User, fn Chowner) error { + if fn == nil { + return nil + } + user, err := fn(old) + if err != nil { + return errors.WithStack(err) + } + if user != nil { + if err := os.Lchown(p, user.Uid, user.Gid); err != nil { + return err + } + } + return nil +} + +// MkdirAll is forked os.MkdirAll +func MkdirAll(path string, perm os.FileMode, user Chowner, tm *time.Time) error { + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent. + err = MkdirAll(fixRootDirectory(path[:j-1]), perm, user, tm) + if err != nil { + return err + } + } + + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + + // Parent now exists; invoke Mkdir and use its result. + err = os.Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + + if err := Chown(path, nil, user); err != nil { + return err + } + + if err := Utimes(path, tm); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go new file mode 100644 index 00000000..8fb0f6bc --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package fs + +import ( + "time" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func fixRootDirectory(p string) string { + return p +} + +func Utimes(p string, tm *time.Time) error { + if tm == nil { + return nil + } + + ts, err := unix.TimeToTimespec(*tm) + if err != nil { + return err + } + + timespec := []unix.Timespec{ts, ts} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, p, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", p) + } + + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go new file mode 100644 index 00000000..6bd17e81 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go @@ -0,0 +1,21 @@ +// +build windows + +package fs + +import ( + "os" + "time" +) + +func fixRootDirectory(p string) string { + if len(p) == len(`\\?\c:`) { + if os.IsPathSeparator(p[0]) && os.IsPathSeparator(p[1]) && p[2] == '?' && os.IsPathSeparator(p[3]) && p[5] == ':' { + return p + `\` + } + } + return p +} + +func Utimes(p string, tm *time.Time) error { + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/stat_bsd.go b/vendor/github.com/tonistiigi/fsutil/copy/stat_bsd.go new file mode 100644 index 00000000..362142de --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/stat_bsd.go @@ -0,0 +1,17 @@ +// +build darwin freebsd netbsd openbsd + +package fs + +import ( + "syscall" +) + +// Returns the last-accessed time +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atimespec +} + +// Returns the last-modified time +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtimespec +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/stat_sysv.go b/vendor/github.com/tonistiigi/fsutil/copy/stat_sysv.go new file mode 100644 index 00000000..59accf05 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/stat_sysv.go @@ -0,0 +1,17 @@ +// +build dragonfly linux solaris + +package fs + +import ( + "syscall" +) + +// Returns the last-accessed time +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atim +} + +// Returns the last-modified time +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtim +} diff --git a/vendor/github.com/tonistiigi/fsutil/diff.go b/vendor/github.com/tonistiigi/fsutil/diff.go new file mode 100644 index 00000000..1cbc32b3 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/diff.go @@ -0,0 +1,51 @@ +package fsutil + +import ( + "context" + "hash" + "os" + + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil/types" +) + +type walkerFn func(ctx context.Context, pathC chan<- *currentPath) error + +func Changes(ctx context.Context, a, b walkerFn, changeFn ChangeFunc) error { + return nil +} + +type HandleChangeFn func(ChangeKind, string, os.FileInfo, error) error + +type ContentHasher func(*types.Stat) (hash.Hash, error) + +func GetWalkerFn(root string) walkerFn { + return func(ctx context.Context, pathC chan<- *currentPath) error { + return Walk(ctx, root, nil, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + stat, ok := f.Sys().(*types.Stat) + if !ok { + return errors.Errorf("%T invalid file without stat information", f.Sys()) + } + + p := ¤tPath{ + path: path, + stat: stat, + } + + select { + case <-ctx.Done(): + return ctx.Err() + case pathC <- p: + return nil + } + }) + } +} + +func emptyWalker(ctx context.Context, pathC chan<- *currentPath) error { + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/diff_containerd.go b/vendor/github.com/tonistiigi/fsutil/diff_containerd.go new file mode 100644 index 00000000..37027152 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/diff_containerd.go @@ -0,0 +1,197 @@ +package fsutil + +import ( + "context" + "os" + "strings" + + "github.com/tonistiigi/fsutil/types" + "golang.org/x/sync/errgroup" +) + +// Everything below is copied from containerd/fs. TODO: remove duplication @dmcgowan + +// Const redefined because containerd/fs doesn't build on !linux + +// ChangeKind is the type of modification that +// a change is making. +type ChangeKind int + +const ( + // ChangeKindAdd represents an addition of + // a file + ChangeKindAdd ChangeKind = iota + + // ChangeKindModify represents a change to + // an existing file + ChangeKindModify + + // ChangeKindDelete represents a delete of + // a file + ChangeKindDelete +) + +// ChangeFunc is the type of function called for each change +// computed during a directory changes calculation. +type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error + +type currentPath struct { + path string + stat *types.Stat + // fullPath string +} + +// doubleWalkDiff walks both directories to create a diff +func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b walkerFn, filter FilterFunc) (err error) { + g, ctx := errgroup.WithContext(ctx) + + var ( + c1 = make(chan *currentPath, 128) + c2 = make(chan *currentPath, 128) + + f1, f2 *currentPath + rmdir string + ) + g.Go(func() error { + defer close(c1) + return a(ctx, c1) + }) + g.Go(func() error { + defer close(c2) + return b(ctx, c2) + }) + g.Go(func() error { + loop0: + for c1 != nil || c2 != nil { + if f1 == nil && c1 != nil { + f1, err = nextPath(ctx, c1) + if err != nil { + return err + } + if f1 == nil { + c1 = nil + } + } + + if f2 == nil && c2 != nil { + f2, err = nextPath(ctx, c2) + if err != nil { + return err + } + if f2 == nil { + c2 = nil + } + } + if f1 == nil && f2 == nil { + continue + } + + var f *types.Stat + var f2copy *currentPath + if f2 != nil { + statCopy := *f2.stat + if filter != nil { + filter(f2.path, &statCopy) + } + f2copy = ¤tPath{path: f2.path, stat: &statCopy} + } + k, p := pathChange(f1, f2copy) + switch k { + case ChangeKindAdd: + if rmdir != "" { + rmdir = "" + } + f = f2.stat + f2 = nil + case ChangeKindDelete: + // Check if this file is already removed by being + // under of a removed directory + if rmdir != "" && strings.HasPrefix(f1.path, rmdir) { + f1 = nil + continue + } else if rmdir == "" && f1.stat.IsDir() { + rmdir = f1.path + string(os.PathSeparator) + } else if rmdir != "" { + rmdir = "" + } + f1 = nil + case ChangeKindModify: + same, err := sameFile(f1, f2copy) + if err != nil { + return err + } + if f1.stat.IsDir() && !f2copy.stat.IsDir() { + rmdir = f1.path + string(os.PathSeparator) + } else if rmdir != "" { + rmdir = "" + } + f = f2.stat + f1 = nil + f2 = nil + if same { + continue loop0 + } + } + if err := changeFn(k, p, &StatInfo{f}, nil); err != nil { + return err + } + } + return nil + }) + + return g.Wait() +} + +func pathChange(lower, upper *currentPath) (ChangeKind, string) { + if lower == nil { + if upper == nil { + panic("cannot compare nil paths") + } + return ChangeKindAdd, upper.path + } + if upper == nil { + return ChangeKindDelete, lower.path + } + + switch i := ComparePath(lower.path, upper.path); { + case i < 0: + // File in lower that is not in upper + return ChangeKindDelete, lower.path + case i > 0: + // File in upper that is not in lower + return ChangeKindAdd, upper.path + default: + return ChangeKindModify, upper.path + } +} + +func sameFile(f1, f2 *currentPath) (same bool, retErr error) { + // If not a directory also check size, modtime, and content + if !f1.stat.IsDir() { + if f1.stat.Size_ != f2.stat.Size_ { + return false, nil + } + + if f1.stat.ModTime != f2.stat.ModTime { + return false, nil + } + } + + return compareStat(f1.stat, f2.stat) +} + +// compareStat returns whether the stats are equivalent, +// whether the files are considered the same file, and +// an error +func compareStat(ls1, ls2 *types.Stat) (bool, error) { + return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Devmajor == ls2.Devmajor && ls1.Devminor == ls2.Devminor && ls1.Linkname == ls2.Linkname, nil +} + +func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case p := <-pathC: + return p, nil + } +} diff --git a/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go b/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go new file mode 100644 index 00000000..4ac7ec5e --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go @@ -0,0 +1,37 @@ +package fsutil + +import ( + "bytes" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" +) + +// compareSysStat returns whether the stats are equivalent, +// whether the files are considered the same file, and +// an error +func compareSysStat(s1, s2 interface{}) (bool, error) { + ls1, ok := s1.(*syscall.Stat_t) + if !ok { + return false, nil + } + ls2, ok := s2.(*syscall.Stat_t) + if !ok { + return false, nil + } + + return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil +} + +func compareCapabilities(p1, p2 string) (bool, error) { + c1, err := sysx.LGetxattr(p1, "security.capability") + if err != nil && err != syscall.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p1) + } + c2, err := sysx.LGetxattr(p2, "security.capability") + if err != nil && err != syscall.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p2) + } + return bytes.Equal(c1, c2), nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter.go b/vendor/github.com/tonistiigi/fsutil/diskwriter.go new file mode 100644 index 00000000..70323c88 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter.go @@ -0,0 +1,352 @@ +package fsutil + +import ( + "context" + "hash" + "io" + "os" + "path/filepath" + "strconv" + "sync" + "time" + + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil/types" + "golang.org/x/sync/errgroup" +) + +type WriteToFunc func(context.Context, string, io.WriteCloser) error + +type DiskWriterOpt struct { + AsyncDataCb WriteToFunc + SyncDataCb WriteToFunc + NotifyCb func(ChangeKind, string, os.FileInfo, error) error + ContentHasher ContentHasher + Filter FilterFunc +} + +type FilterFunc func(string, *types.Stat) bool + +type DiskWriter struct { + opt DiskWriterOpt + dest string + + wg sync.WaitGroup + ctx context.Context + cancel func() + eg *errgroup.Group + filter FilterFunc +} + +func NewDiskWriter(ctx context.Context, dest string, opt DiskWriterOpt) (*DiskWriter, error) { + if opt.SyncDataCb == nil && opt.AsyncDataCb == nil { + return nil, errors.New("no data callback specified") + } + if opt.SyncDataCb != nil && opt.AsyncDataCb != nil { + return nil, errors.New("can't specify both sync and async data callbacks") + } + + ctx, cancel := context.WithCancel(ctx) + eg, ctx := errgroup.WithContext(ctx) + + return &DiskWriter{ + opt: opt, + dest: dest, + eg: eg, + ctx: ctx, + cancel: cancel, + filter: opt.Filter, + }, nil +} + +func (dw *DiskWriter) Wait(ctx context.Context) error { + return dw.eg.Wait() +} + +func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { + if err != nil { + return err + } + + select { + case <-dw.ctx.Done(): + return dw.ctx.Err() + default: + } + + defer func() { + if retErr != nil { + dw.cancel() + } + }() + + destPath := filepath.Join(dw.dest, filepath.FromSlash(p)) + + if kind == ChangeKindDelete { + if dw.filter != nil { + var empty types.Stat + if ok := dw.filter(p, &empty); !ok { + return nil + } + } + // todo: no need to validate if diff is trusted but is it always? + if err := os.RemoveAll(destPath); err != nil { + return errors.Wrapf(err, "failed to remove: %s", destPath) + } + if dw.opt.NotifyCb != nil { + if err := dw.opt.NotifyCb(kind, p, nil, nil); err != nil { + return err + } + } + return nil + } + + stat, ok := fi.Sys().(*types.Stat) + if !ok { + return errors.Errorf("%s invalid change without stat information", p) + } + + statCopy := *stat + + if dw.filter != nil { + if ok := dw.filter(p, &statCopy); !ok { + return nil + } + } + + rename := true + oldFi, err := os.Lstat(destPath) + if err != nil { + if os.IsNotExist(err) { + if kind != ChangeKindAdd { + return errors.Wrapf(err, "invalid addition: %s", destPath) + } + rename = false + } else { + return errors.Wrapf(err, "failed to stat %s", destPath) + } + } + + if oldFi != nil && fi.IsDir() && oldFi.IsDir() { + if err := rewriteMetadata(destPath, &statCopy); err != nil { + return errors.Wrapf(err, "error setting dir metadata for %s", destPath) + } + return nil + } + + newPath := destPath + if rename { + newPath = filepath.Join(filepath.Dir(destPath), ".tmp."+nextSuffix()) + } + + isRegularFile := false + + switch { + case fi.IsDir(): + if err := os.Mkdir(newPath, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to create dir %s", newPath) + } + case fi.Mode()&os.ModeDevice != 0 || fi.Mode()&os.ModeNamedPipe != 0: + if err := handleTarTypeBlockCharFifo(newPath, &statCopy); err != nil { + return errors.Wrapf(err, "failed to create device %s", newPath) + } + case fi.Mode()&os.ModeSymlink != 0: + if err := os.Symlink(statCopy.Linkname, newPath); err != nil { + return errors.Wrapf(err, "failed to symlink %s", newPath) + } + case statCopy.Linkname != "": + if err := os.Link(filepath.Join(dw.dest, statCopy.Linkname), newPath); err != nil { + return errors.Wrapf(err, "failed to link %s to %s", newPath, statCopy.Linkname) + } + default: + isRegularFile = true + file, err := os.OpenFile(newPath, os.O_CREATE|os.O_WRONLY, fi.Mode()) //todo: windows + if err != nil { + return errors.Wrapf(err, "failed to create %s", newPath) + } + if dw.opt.SyncDataCb != nil { + if err := dw.processChange(ChangeKindAdd, p, fi, file); err != nil { + file.Close() + return err + } + break + } + if err := file.Close(); err != nil { + return errors.Wrapf(err, "failed to close %s", newPath) + } + } + + if err := rewriteMetadata(newPath, &statCopy); err != nil { + return errors.Wrapf(err, "error setting metadata for %s", newPath) + } + + if rename { + if oldFi.IsDir() != fi.IsDir() { + if err := os.RemoveAll(destPath); err != nil { + return errors.Wrapf(err, "failed to remove %s", destPath) + } + } + if err := os.Rename(newPath, destPath); err != nil { + return errors.Wrapf(err, "failed to rename %s to %s", newPath, destPath) + } + } + + if isRegularFile { + if dw.opt.AsyncDataCb != nil { + dw.requestAsyncFileData(p, destPath, fi, &statCopy) + } + } else { + return dw.processChange(kind, p, fi, nil) + } + + return nil +} + +func (dw *DiskWriter) requestAsyncFileData(p, dest string, fi os.FileInfo, st *types.Stat) { + // todo: limit worker threads + dw.eg.Go(func() error { + if err := dw.processChange(ChangeKindAdd, p, fi, &lazyFileWriter{ + dest: dest, + }); err != nil { + return err + } + return chtimes(dest, st.ModTime) // TODO: parent dirs + }) +} + +func (dw *DiskWriter) processChange(kind ChangeKind, p string, fi os.FileInfo, w io.WriteCloser) error { + origw := w + var hw *hashedWriter + if dw.opt.NotifyCb != nil { + var err error + if hw, err = newHashWriter(dw.opt.ContentHasher, fi, w); err != nil { + return err + } + w = hw + } + if origw != nil { + fn := dw.opt.SyncDataCb + if fn == nil && dw.opt.AsyncDataCb != nil { + fn = dw.opt.AsyncDataCb + } + if err := fn(dw.ctx, p, w); err != nil { + return err + } + } else { + if hw != nil { + hw.Close() + } + } + if hw != nil { + return dw.opt.NotifyCb(kind, p, hw, nil) + } + return nil +} + +type hashedWriter struct { + os.FileInfo + io.Writer + h hash.Hash + w io.WriteCloser + dgst digest.Digest +} + +func newHashWriter(ch ContentHasher, fi os.FileInfo, w io.WriteCloser) (*hashedWriter, error) { + stat, ok := fi.Sys().(*types.Stat) + if !ok { + return nil, errors.Errorf("invalid change without stat information") + } + + h, err := ch(stat) + if err != nil { + return nil, err + } + hw := &hashedWriter{ + FileInfo: fi, + Writer: io.MultiWriter(w, h), + h: h, + w: w, + } + return hw, nil +} + +func (hw *hashedWriter) Close() error { + hw.dgst = digest.NewDigest(digest.SHA256, hw.h) + if hw.w != nil { + return hw.w.Close() + } + return nil +} + +func (hw *hashedWriter) Digest() digest.Digest { + return hw.dgst +} + +type lazyFileWriter struct { + dest string + ctx context.Context + f *os.File + fileMode *os.FileMode +} + +func (lfw *lazyFileWriter) Write(dt []byte) (int, error) { + if lfw.f == nil { + file, err := os.OpenFile(lfw.dest, os.O_WRONLY, 0) //todo: windows + if os.IsPermission(err) { + // retry after chmod + fi, er := os.Stat(lfw.dest) + if er == nil { + mode := fi.Mode() + lfw.fileMode = &mode + er = os.Chmod(lfw.dest, mode|0222) + if er == nil { + file, err = os.OpenFile(lfw.dest, os.O_WRONLY, 0) + } + } + } + if err != nil { + return 0, errors.Wrapf(err, "failed to open %s", lfw.dest) + } + lfw.f = file + } + return lfw.f.Write(dt) +} + +func (lfw *lazyFileWriter) Close() error { + var err error + if lfw.f != nil { + err = lfw.f.Close() + } + if err == nil && lfw.fileMode != nil { + err = os.Chmod(lfw.dest, *lfw.fileMode) + } + return err +} + +func mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go new file mode 100644 index 00000000..ff0a22e3 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go @@ -0,0 +1,52 @@ +// +build !windows + +package fsutil + +import ( + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil/types" +) + +func rewriteMetadata(p string, stat *types.Stat) error { + for key, value := range stat.Xattrs { + sysx.Setxattr(p, key, value, 0) + } + + if err := os.Lchown(p, int(stat.Uid), int(stat.Gid)); err != nil { + return errors.Wrapf(err, "failed to lchown %s", p) + } + + if os.FileMode(stat.Mode)&os.ModeSymlink == 0 { + if err := os.Chmod(p, os.FileMode(stat.Mode)); err != nil { + return errors.Wrapf(err, "failed to chown %s", p) + } + } + + if err := chtimes(p, stat.ModTime); err != nil { + return errors.Wrapf(err, "failed to chtimes %s", p) + } + + return nil +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(path string, stat *types.Stat) error { + mode := uint32(stat.Mode & 07777) + if os.FileMode(stat.Mode)&os.ModeCharDevice != 0 { + mode |= syscall.S_IFCHR + } else if os.FileMode(stat.Mode)&os.ModeNamedPipe != 0 { + mode |= syscall.S_IFIFO + } else { + mode |= syscall.S_IFBLK + } + + if err := syscall.Mknod(path, mode, int(mkdev(stat.Devmajor, stat.Devminor))); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go new file mode 100644 index 00000000..036544f0 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go @@ -0,0 +1,18 @@ +// +build windows + +package fsutil + +import ( + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil/types" +) + +func rewriteMetadata(p string, stat *types.Stat) error { + return chtimes(p, stat.ModTime) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(path string, stat *types.Stat) error { + return errors.New("Not implemented on windows") +} diff --git a/vendor/github.com/tonistiigi/fsutil/followlinks.go b/vendor/github.com/tonistiigi/fsutil/followlinks.go new file mode 100644 index 00000000..ed4af6e8 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/followlinks.go @@ -0,0 +1,150 @@ +package fsutil + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + strings "strings" + + "github.com/pkg/errors" +) + +func FollowLinks(root string, paths []string) ([]string, error) { + r := &symlinkResolver{root: root, resolved: map[string]struct{}{}} + for _, p := range paths { + if err := r.append(p); err != nil { + return nil, err + } + } + res := make([]string, 0, len(r.resolved)) + for r := range r.resolved { + res = append(res, r) + } + sort.Strings(res) + return dedupePaths(res), nil +} + +type symlinkResolver struct { + root string + resolved map[string]struct{} +} + +func (r *symlinkResolver) append(p string) error { + p = filepath.Join(".", p) + current := "." + for { + parts := strings.SplitN(p, string(filepath.Separator), 2) + current = filepath.Join(current, parts[0]) + + targets, err := r.readSymlink(current, true) + if err != nil { + return err + } + + p = "" + if len(parts) == 2 { + p = parts[1] + } + + if p == "" || targets != nil { + if _, ok := r.resolved[current]; ok { + return nil + } + } + + if targets != nil { + r.resolved[current] = struct{}{} + for _, target := range targets { + if err := r.append(filepath.Join(target, p)); err != nil { + return err + } + } + return nil + } + + if p == "" { + r.resolved[current] = struct{}{} + return nil + } + } +} + +func (r *symlinkResolver) readSymlink(p string, allowWildcard bool) ([]string, error) { + realPath := filepath.Join(r.root, p) + base := filepath.Base(p) + if allowWildcard && containsWildcards(base) { + fis, err := ioutil.ReadDir(filepath.Dir(realPath)) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, errors.Wrapf(err, "failed to read dir %s", filepath.Dir(realPath)) + } + var out []string + for _, f := range fis { + if ok, _ := filepath.Match(base, f.Name()); ok { + res, err := r.readSymlink(filepath.Join(filepath.Dir(p), f.Name()), false) + if err != nil { + return nil, err + } + out = append(out, res...) + } + } + return out, nil + } + + fi, err := os.Lstat(realPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, errors.Wrapf(err, "failed to lstat %s", realPath) + } + if fi.Mode()&os.ModeSymlink == 0 { + return nil, nil + } + link, err := os.Readlink(realPath) + if err != nil { + return nil, errors.Wrapf(err, "failed to readlink %s", realPath) + } + link = filepath.Clean(link) + if filepath.IsAbs(link) { + return []string{link}, nil + } + return []string{ + filepath.Join(string(filepath.Separator), filepath.Join(filepath.Dir(p), link)), + }, nil +} + +func containsWildcards(name string) bool { + isWindows := runtime.GOOS == "windows" + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' && !isWindows { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +// dedupePaths expects input as a sorted list +func dedupePaths(in []string) []string { + out := make([]string, 0, len(in)) + var last string + for _, s := range in { + // if one of the paths is root there is no filter + if s == "." { + return nil + } + if strings.HasPrefix(s, last+string(filepath.Separator)) { + continue + } + out = append(out, s) + last = s + } + return out +} diff --git a/vendor/github.com/tonistiigi/fsutil/fs.go b/vendor/github.com/tonistiigi/fsutil/fs.go new file mode 100644 index 00000000..a9467e94 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/fs.go @@ -0,0 +1,118 @@ +package fsutil + +import ( + "context" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "sort" + "strings" + + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil/types" +) + +type FS interface { + Walk(context.Context, filepath.WalkFunc) error + Open(string) (io.ReadCloser, error) +} + +func NewFS(root string, opt *WalkOpt) FS { + return &fs{ + root: root, + opt: opt, + } +} + +type fs struct { + root string + opt *WalkOpt +} + +func (fs *fs) Walk(ctx context.Context, fn filepath.WalkFunc) error { + return Walk(ctx, fs.root, fs.opt, fn) +} + +func (fs *fs) Open(p string) (io.ReadCloser, error) { + return os.Open(filepath.Join(fs.root, p)) +} + +type Dir struct { + Stat types.Stat + FS FS +} + +func SubDirFS(dirs []Dir) (FS, error) { + sort.Slice(dirs, func(i, j int) bool { + return dirs[i].Stat.Path < dirs[j].Stat.Path + }) + m := map[string]Dir{} + for _, d := range dirs { + if path.Base(d.Stat.Path) != d.Stat.Path { + return nil, errors.Errorf("subdir %s must be single file", d.Stat.Path) + } + if _, ok := m[d.Stat.Path]; ok { + return nil, errors.Errorf("invalid path %s", d.Stat.Path) + } + m[d.Stat.Path] = d + } + return &subDirFS{m: m, dirs: dirs}, nil +} + +type subDirFS struct { + m map[string]Dir + dirs []Dir +} + +func (fs *subDirFS) Walk(ctx context.Context, fn filepath.WalkFunc) error { + for _, d := range fs.dirs { + fi := &StatInfo{Stat: &d.Stat} + if !fi.IsDir() { + return errors.Errorf("fs subdir %s not mode directory", d.Stat.Path) + } + if err := fn(d.Stat.Path, fi, nil); err != nil { + return err + } + if err := d.FS.Walk(ctx, func(p string, fi os.FileInfo, err error) error { + stat, ok := fi.Sys().(*types.Stat) + if !ok { + return errors.Wrapf(err, "invalid fileinfo without stat info: %s", p) + } + stat.Path = path.Join(d.Stat.Path, stat.Path) + if stat.Linkname != "" { + if fi.Mode()&os.ModeSymlink != 0 { + if strings.HasPrefix(stat.Linkname, "/") { + stat.Linkname = path.Join("/"+d.Stat.Path, stat.Linkname) + } + } else { + stat.Linkname = path.Join(d.Stat.Path, stat.Linkname) + } + } + return fn(filepath.Join(d.Stat.Path, p), &StatInfo{stat}, nil) + }); err != nil { + return err + } + } + return nil +} + +func (fs *subDirFS) Open(p string) (io.ReadCloser, error) { + parts := strings.SplitN(filepath.Clean(p), string(filepath.Separator), 2) + if len(parts) == 0 { + return ioutil.NopCloser(&emptyReader{}), nil + } + d, ok := fs.m[parts[0]] + if !ok { + return nil, os.ErrNotExist + } + return d.FS.Open(parts[1]) +} + +type emptyReader struct { +} + +func (*emptyReader) Read([]byte) (int, error) { + return 0, io.EOF +} diff --git a/vendor/github.com/tonistiigi/fsutil/go.mod b/vendor/github.com/tonistiigi/fsutil/go.mod new file mode 100644 index 00000000..fe8cc86b --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/go.mod @@ -0,0 +1,28 @@ +module github.com/tonistiigi/fsutil + +require ( + github.com/Microsoft/go-winio v0.4.11 // indirect + github.com/containerd/continuity v0.0.0-20181001140422-bd77b46c8352 + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/docker v0.0.0-20180531152204-71cd53e4a197 + github.com/docker/go-units v0.3.1 // indirect + github.com/gogo/protobuf v1.3.1 + github.com/google/go-cmp v0.2.0 // indirect + github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect + github.com/onsi/ginkgo v1.7.0 // indirect + github.com/onsi/gomega v1.4.3 // indirect + github.com/opencontainers/go-digest v1.0.0-rc1 + github.com/opencontainers/image-spec v1.0.1 // indirect + github.com/opencontainers/runc v1.0.0-rc6 // indirect + github.com/pkg/errors v0.8.1 + github.com/sirupsen/logrus v1.0.3 // indirect + github.com/stretchr/testify v1.3.0 + golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 // indirect + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f + golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e + gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect + gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect + gotest.tools v2.1.0+incompatible // indirect +) + +go 1.13 diff --git a/vendor/github.com/tonistiigi/fsutil/go.sum b/vendor/github.com/tonistiigi/fsutil/go.sum new file mode 100644 index 00000000..4adcb662 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/go.sum @@ -0,0 +1,70 @@ +github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/containerd/continuity v0.0.0-20181001140422-bd77b46c8352 h1:CdBoaTKPl60tksFVWYc5QnLWwXBcU+XcLiXx8+NcZ9o= +github.com/containerd/continuity v0.0.0-20181001140422-bd77b46c8352/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/docker v0.0.0-20180531152204-71cd53e4a197 h1:raQhUHOMIAZAWHmo3hLEwoIy0aVkKb2uxZdWw/Up+HI= +github.com/docker/docker v0.0.0-20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-units v0.3.1 h1:QAFdsA6jLCnglbqE6mUsHuPcJlntY94DkxHf4deHKIU= +github.com/docker/go-units v0.3.1/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.0.0-rc6 h1:7AoN22rYxxkmsJS48wFaziH/n0OvrZVqL/TglgHKbKQ= +github.com/opencontainers/runc v1.0.0-rc6/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.0.3 h1:B5C/igNWoiULof20pKfY4VntcIPqKuwEmoLZrabbUrc= +github.com/sirupsen/logrus v1.0.3/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.1.0+incompatible h1:5USw7CrJBYKqjg9R7QlA6jzqZKEAtvW82aNmsxxGPxw= +gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= diff --git a/vendor/github.com/tonistiigi/fsutil/hardlinks.go b/vendor/github.com/tonistiigi/fsutil/hardlinks.go new file mode 100644 index 00000000..d977f0d6 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/hardlinks.go @@ -0,0 +1,47 @@ +package fsutil + +import ( + "os" + + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil/types" +) + +// Hardlinks validates that all targets for links were part of the changes + +type Hardlinks struct { + seenFiles map[string]struct{} +} + +func (v *Hardlinks) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if v.seenFiles == nil { + v.seenFiles = make(map[string]struct{}) + } + + if kind == ChangeKindDelete { + return nil + } + + stat, ok := fi.Sys().(*types.Stat) + if !ok { + return errors.Errorf("invalid change without stat info: %s", p) + } + + if fi.IsDir() || fi.Mode()&os.ModeSymlink != 0 { + return nil + } + + if len(stat.Linkname) > 0 { + if _, ok := v.seenFiles[stat.Linkname]; !ok { + return errors.Errorf("invalid link %s to unknown path: %q", p, stat.Linkname) + } + } else { + v.seenFiles[p] = struct{}{} + } + + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/readme.md b/vendor/github.com/tonistiigi/fsutil/readme.md new file mode 100644 index 00000000..5ce685b7 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/readme.md @@ -0,0 +1,45 @@ +Incremental file directory sync tools in golang. + +``` +BENCH_FILE_SIZE=10000 ./bench.test --test.bench . +BenchmarkCopyWithTar10-4 2000 995242 ns/op +BenchmarkCopyWithTar50-4 300 4710021 ns/op +BenchmarkCopyWithTar200-4 100 16627260 ns/op +BenchmarkCopyWithTar1000-4 20 60031459 ns/op +BenchmarkCPA10-4 1000 1678367 ns/op +BenchmarkCPA50-4 500 3690306 ns/op +BenchmarkCPA200-4 200 9495066 ns/op +BenchmarkCPA1000-4 50 29769289 ns/op +BenchmarkDiffCopy10-4 2000 943889 ns/op +BenchmarkDiffCopy50-4 500 3285950 ns/op +BenchmarkDiffCopy200-4 200 8563792 ns/op +BenchmarkDiffCopy1000-4 50 29511340 ns/op +BenchmarkDiffCopyProto10-4 2000 944615 ns/op +BenchmarkDiffCopyProto50-4 500 3334940 ns/op +BenchmarkDiffCopyProto200-4 200 9420038 ns/op +BenchmarkDiffCopyProto1000-4 50 30632429 ns/op +BenchmarkIncrementalDiffCopy10-4 2000 691993 ns/op +BenchmarkIncrementalDiffCopy50-4 1000 1304253 ns/op +BenchmarkIncrementalDiffCopy200-4 500 3306519 ns/op +BenchmarkIncrementalDiffCopy1000-4 200 10211343 ns/op +BenchmarkIncrementalDiffCopy5000-4 20 55194427 ns/op +BenchmarkIncrementalDiffCopy10000-4 20 91759289 ns/op +BenchmarkIncrementalCopyWithTar10-4 2000 1020258 ns/op +BenchmarkIncrementalCopyWithTar50-4 300 5348786 ns/op +BenchmarkIncrementalCopyWithTar200-4 100 19495000 ns/op +BenchmarkIncrementalCopyWithTar1000-4 20 70338507 ns/op +BenchmarkIncrementalRsync10-4 30 45215754 ns/op +BenchmarkIncrementalRsync50-4 30 45837260 ns/op +BenchmarkIncrementalRsync200-4 30 48780614 ns/op +BenchmarkIncrementalRsync1000-4 20 54801892 ns/op +BenchmarkIncrementalRsync5000-4 20 84782542 ns/op +BenchmarkIncrementalRsync10000-4 10 103355108 ns/op +BenchmarkRsync10-4 30 46776470 ns/op +BenchmarkRsync50-4 30 48601555 ns/op +BenchmarkRsync200-4 20 59642691 ns/op +BenchmarkRsync1000-4 20 101343010 ns/op +BenchmarkGnuTar10-4 500 3171448 ns/op +BenchmarkGnuTar50-4 300 5030296 ns/op +BenchmarkGnuTar200-4 100 10464313 ns/op +BenchmarkGnuTar1000-4 50 30375257 ns/op +``` \ No newline at end of file diff --git a/vendor/github.com/tonistiigi/fsutil/receive.go b/vendor/github.com/tonistiigi/fsutil/receive.go new file mode 100644 index 00000000..0210dcdb --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/receive.go @@ -0,0 +1,276 @@ +package fsutil + +import ( + "context" + "io" + "os" + "sync" + + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil/types" + "golang.org/x/sync/errgroup" +) + +type ReceiveOpt struct { + NotifyHashed ChangeFunc + ContentHasher ContentHasher + ProgressCb func(int, bool) + Merge bool + Filter FilterFunc +} + +func Receive(ctx context.Context, conn Stream, dest string, opt ReceiveOpt) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + r := &receiver{ + conn: &syncStream{Stream: conn}, + dest: dest, + files: make(map[string]uint32), + pipes: make(map[uint32]io.WriteCloser), + notifyHashed: opt.NotifyHashed, + contentHasher: opt.ContentHasher, + progressCb: opt.ProgressCb, + merge: opt.Merge, + filter: opt.Filter, + } + return r.run(ctx) +} + +type receiver struct { + dest string + conn Stream + files map[string]uint32 + pipes map[uint32]io.WriteCloser + mu sync.RWMutex + muPipes sync.RWMutex + progressCb func(int, bool) + merge bool + filter FilterFunc + + notifyHashed ChangeFunc + contentHasher ContentHasher + orderValidator Validator + hlValidator Hardlinks +} + +type dynamicWalker struct { + walkChan chan *currentPath + err error + closeCh chan struct{} +} + +func newDynamicWalker() *dynamicWalker { + return &dynamicWalker{ + walkChan: make(chan *currentPath, 128), + closeCh: make(chan struct{}), + } +} + +func (w *dynamicWalker) update(p *currentPath) error { + select { + case <-w.closeCh: + return errors.Wrap(w.err, "walker is closed") + default: + } + if p == nil { + close(w.walkChan) + return nil + } + select { + case w.walkChan <- p: + return nil + case <-w.closeCh: + return errors.Wrap(w.err, "walker is closed") + } +} + +func (w *dynamicWalker) fill(ctx context.Context, pathC chan<- *currentPath) error { + for { + select { + case p, ok := <-w.walkChan: + if !ok { + return nil + } + select { + case pathC <- p: + case <-ctx.Done(): + w.err = ctx.Err() + close(w.closeCh) + return ctx.Err() + } + case <-ctx.Done(): + w.err = ctx.Err() + close(w.closeCh) + return ctx.Err() + } + } + return nil +} + +func (r *receiver) run(ctx context.Context) error { + g, ctx := errgroup.WithContext(ctx) + + dw, err := NewDiskWriter(ctx, r.dest, DiskWriterOpt{ + AsyncDataCb: r.asyncDataFunc, + NotifyCb: r.notifyHashed, + ContentHasher: r.contentHasher, + Filter: r.filter, + }) + if err != nil { + return err + } + + w := newDynamicWalker() + + g.Go(func() (retErr error) { + defer func() { + if retErr != nil { + r.conn.SendMsg(&types.Packet{Type: types.PACKET_ERR, Data: []byte(retErr.Error())}) + } + }() + destWalker := emptyWalker + if !r.merge { + destWalker = GetWalkerFn(r.dest) + } + err := doubleWalkDiff(ctx, dw.HandleChange, destWalker, w.fill, r.filter) + if err != nil { + return err + } + if err := dw.Wait(ctx); err != nil { + return err + } + r.conn.SendMsg(&types.Packet{Type: types.PACKET_FIN}) + return nil + }) + + g.Go(func() error { + var i uint32 = 0 + + size := 0 + if r.progressCb != nil { + defer func() { + r.progressCb(size, true) + }() + } + var p types.Packet + for { + p = types.Packet{Data: p.Data[:0]} + if err := r.conn.RecvMsg(&p); err != nil { + return err + } + if r.progressCb != nil { + size += p.Size() + r.progressCb(size, false) + } + + switch p.Type { + case types.PACKET_ERR: + return errors.Errorf("error from sender: %s", p.Data) + case types.PACKET_STAT: + if p.Stat == nil { + if err := w.update(nil); err != nil { + return err + } + break + } + if fileCanRequestData(os.FileMode(p.Stat.Mode)) { + r.mu.Lock() + r.files[p.Stat.Path] = i + r.mu.Unlock() + } + i++ + cp := ¤tPath{path: p.Stat.Path, stat: p.Stat} + if err := r.orderValidator.HandleChange(ChangeKindAdd, cp.path, &StatInfo{cp.stat}, nil); err != nil { + return err + } + if err := r.hlValidator.HandleChange(ChangeKindAdd, cp.path, &StatInfo{cp.stat}, nil); err != nil { + return err + } + if err := w.update(cp); err != nil { + return err + } + case types.PACKET_DATA: + r.muPipes.Lock() + pw, ok := r.pipes[p.ID] + r.muPipes.Unlock() + if !ok { + return errors.Errorf("invalid file request %d", p.ID) + } + if len(p.Data) == 0 { + if err := pw.Close(); err != nil { + return err + } + } else { + if _, err := pw.Write(p.Data); err != nil { + return err + } + } + case types.PACKET_FIN: + for { + var p types.Packet + if err := r.conn.RecvMsg(&p); err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + } + }) + return g.Wait() +} + +func (r *receiver) asyncDataFunc(ctx context.Context, p string, wc io.WriteCloser) error { + r.mu.Lock() + id, ok := r.files[p] + if !ok { + r.mu.Unlock() + return errors.Errorf("invalid file request %s", p) + } + delete(r.files, p) + r.mu.Unlock() + + wwc := newWrappedWriteCloser(wc) + r.muPipes.Lock() + r.pipes[id] = wwc + r.muPipes.Unlock() + if err := r.conn.SendMsg(&types.Packet{Type: types.PACKET_REQ, ID: id}); err != nil { + return err + } + err := wwc.Wait(ctx) + if err != nil { + return err + } + r.muPipes.Lock() + delete(r.pipes, id) + r.muPipes.Unlock() + return nil +} + +type wrappedWriteCloser struct { + io.WriteCloser + err error + once sync.Once + done chan struct{} +} + +func newWrappedWriteCloser(wc io.WriteCloser) *wrappedWriteCloser { + return &wrappedWriteCloser{WriteCloser: wc, done: make(chan struct{})} +} + +func (w *wrappedWriteCloser) Close() error { + w.err = w.WriteCloser.Close() + w.once.Do(func() { close(w.done) }) + return w.err +} + +func (w *wrappedWriteCloser) Wait(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return w.err + } +} diff --git a/vendor/github.com/tonistiigi/fsutil/send.go b/vendor/github.com/tonistiigi/fsutil/send.go new file mode 100644 index 00000000..e7c5a37d --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/send.go @@ -0,0 +1,206 @@ +package fsutil + +import ( + "context" + "io" + "os" + "sync" + + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil/types" + "golang.org/x/sync/errgroup" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + return make([]byte, 32*1<<10) + }, +} + +type Stream interface { + RecvMsg(interface{}) error + SendMsg(m interface{}) error + Context() context.Context +} + +func Send(ctx context.Context, conn Stream, fs FS, progressCb func(int, bool)) error { + s := &sender{ + conn: &syncStream{Stream: conn}, + fs: fs, + files: make(map[uint32]string), + progressCb: progressCb, + sendpipeline: make(chan *sendHandle, 128), + } + return s.run(ctx) +} + +type sendHandle struct { + id uint32 + path string +} + +type sender struct { + conn Stream + fs FS + files map[uint32]string + mu sync.RWMutex + progressCb func(int, bool) + progressCurrent int + sendpipeline chan *sendHandle +} + +func (s *sender) run(ctx context.Context) error { + g, ctx := errgroup.WithContext(ctx) + + defer s.updateProgress(0, true) + + g.Go(func() error { + err := s.walk(ctx) + if err != nil { + s.conn.SendMsg(&types.Packet{Type: types.PACKET_ERR, Data: []byte(err.Error())}) + } + return err + }) + + for i := 0; i < 4; i++ { + g.Go(func() error { + for h := range s.sendpipeline { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err := s.sendFile(h); err != nil { + return err + } + } + return nil + }) + } + + g.Go(func() error { + defer close(s.sendpipeline) + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + var p types.Packet + if err := s.conn.RecvMsg(&p); err != nil { + return err + } + switch p.Type { + case types.PACKET_ERR: + return errors.Errorf("error from receiver: %s", p.Data) + case types.PACKET_REQ: + if err := s.queue(p.ID); err != nil { + return err + } + case types.PACKET_FIN: + return s.conn.SendMsg(&types.Packet{Type: types.PACKET_FIN}) + } + } + }) + + return g.Wait() +} + +func (s *sender) updateProgress(size int, last bool) { + if s.progressCb != nil { + s.progressCurrent += size + s.progressCb(s.progressCurrent, last) + } +} + +func (s *sender) queue(id uint32) error { + s.mu.Lock() + p, ok := s.files[id] + if !ok { + s.mu.Unlock() + return errors.Errorf("invalid file id %d", id) + } + delete(s.files, id) + s.mu.Unlock() + s.sendpipeline <- &sendHandle{id, p} + return nil +} + +func (s *sender) sendFile(h *sendHandle) error { + f, err := s.fs.Open(h.path) + if err == nil { + defer f.Close() + buf := bufPool.Get().([]byte) + defer bufPool.Put(buf) + if _, err := io.CopyBuffer(&fileSender{sender: s, id: h.id}, f, buf); err != nil { + return err + } + } + return s.conn.SendMsg(&types.Packet{ID: h.id, Type: types.PACKET_DATA}) +} + +func (s *sender) walk(ctx context.Context) error { + var i uint32 = 0 + err := s.fs.Walk(ctx, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + stat, ok := fi.Sys().(*types.Stat) + if !ok { + return errors.Wrapf(err, "invalid fileinfo without stat info: %s", path) + } + + p := &types.Packet{ + Type: types.PACKET_STAT, + Stat: stat, + } + if fileCanRequestData(os.FileMode(stat.Mode)) { + s.mu.Lock() + s.files[i] = stat.Path + s.mu.Unlock() + } + i++ + s.updateProgress(p.Size(), false) + return errors.Wrapf(s.conn.SendMsg(p), "failed to send stat %s", path) + }) + if err != nil { + return err + } + return errors.Wrapf(s.conn.SendMsg(&types.Packet{Type: types.PACKET_STAT}), "failed to send last stat") +} + +func fileCanRequestData(m os.FileMode) bool { + // avoid updating this function as it needs to match between sender/receiver. + // version if needed + return m&os.ModeType == 0 +} + +type fileSender struct { + sender *sender + id uint32 +} + +func (fs *fileSender) Write(dt []byte) (int, error) { + if len(dt) == 0 { + return 0, nil + } + p := &types.Packet{Type: types.PACKET_DATA, ID: fs.id, Data: dt} + if err := fs.sender.conn.SendMsg(p); err != nil { + return 0, err + } + fs.sender.updateProgress(p.Size(), false) + return len(dt), nil +} + +type syncStream struct { + Stream + mu sync.Mutex +} + +func (ss *syncStream) SendMsg(m interface{}) error { + ss.mu.Lock() + err := ss.Stream.SendMsg(m) + ss.mu.Unlock() + return err +} diff --git a/vendor/github.com/tonistiigi/fsutil/stat.go b/vendor/github.com/tonistiigi/fsutil/stat.go new file mode 100644 index 00000000..789dce3d --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/stat.go @@ -0,0 +1,64 @@ +package fsutil + +import ( + "os" + "path/filepath" + "runtime" + + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil/types" +) + +// constructs a Stat object. path is where the path can be found right +// now, relpath is the desired path to be recorded in the stat (so +// relative to whatever base dir is relevant). fi is the os.Stat +// info. inodemap is used to calculate hardlinks over a series of +// mkstat calls and maps inode to the canonical (aka "first") path for +// a set of hardlinks to that inode. +func mkstat(path, relpath string, fi os.FileInfo, inodemap map[uint64]string) (*types.Stat, error) { + relpath = filepath.ToSlash(relpath) + + stat := &types.Stat{ + Path: relpath, + Mode: uint32(fi.Mode()), + ModTime: fi.ModTime().UnixNano(), + } + + setUnixOpt(fi, stat, relpath, inodemap) + + if !fi.IsDir() { + stat.Size_ = fi.Size() + if fi.Mode()&os.ModeSymlink != 0 { + link, err := os.Readlink(path) + if err != nil { + return nil, errors.Wrapf(err, "failed to readlink %s", path) + } + stat.Linkname = link + } + } + if err := loadXattr(path, stat); err != nil { + return nil, errors.Wrapf(err, "failed to xattr %s", relpath) + } + + if runtime.GOOS == "windows" { + permPart := stat.Mode & uint32(os.ModePerm) + noPermPart := stat.Mode &^ uint32(os.ModePerm) + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + stat.Mode = noPermPart | permPart + } + + // Clear the socket bit since archive/tar.FileInfoHeader does not handle it + stat.Mode &^= uint32(os.ModeSocket) + + return stat, nil +} + +func Stat(path string) (*types.Stat, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, errors.Wrap(err, "os stat") + } + return mkstat(path, filepath.Base(path), fi, nil) +} diff --git a/vendor/github.com/tonistiigi/fsutil/stat_unix.go b/vendor/github.com/tonistiigi/fsutil/stat_unix.go new file mode 100644 index 00000000..af08522c --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/stat_unix.go @@ -0,0 +1,71 @@ +// +build !windows + +package fsutil + +import ( + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil/types" +) + +func loadXattr(origpath string, stat *types.Stat) error { + xattrs, err := sysx.LListxattr(origpath) + if err != nil { + if errors.Cause(err) == syscall.ENOTSUP { + return nil + } + return errors.Wrapf(err, "failed to xattr %s", origpath) + } + if len(xattrs) > 0 { + m := make(map[string][]byte) + for _, key := range xattrs { + v, err := sysx.LGetxattr(origpath, key) + if err == nil { + m[key] = v + } + } + stat.Xattrs = m + } + return nil +} + +func setUnixOpt(fi os.FileInfo, stat *types.Stat, path string, seenFiles map[uint64]string) { + s := fi.Sys().(*syscall.Stat_t) + + stat.Uid = s.Uid + stat.Gid = s.Gid + + if !fi.IsDir() { + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + stat.Devmajor = int64(major(uint64(s.Rdev))) + stat.Devminor = int64(minor(uint64(s.Rdev))) + } + + ino := s.Ino + linked := false + if seenFiles != nil { + if s.Nlink > 1 { + if oldpath, ok := seenFiles[ino]; ok { + stat.Linkname = oldpath + stat.Size_ = 0 + linked = true + } + } + if !linked { + seenFiles[ino] = path + } + } + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} diff --git a/vendor/github.com/tonistiigi/fsutil/stat_windows.go b/vendor/github.com/tonistiigi/fsutil/stat_windows.go new file mode 100644 index 00000000..66379bd8 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/stat_windows.go @@ -0,0 +1,16 @@ +// +build windows + +package fsutil + +import ( + "os" + + "github.com/tonistiigi/fsutil/types" +) + +func loadXattr(_ string, _ *types.Stat) error { + return nil +} + +func setUnixOpt(_ os.FileInfo, _ *types.Stat, _ string, _ map[uint64]string) { +} diff --git a/vendor/github.com/tonistiigi/fsutil/tarwriter.go b/vendor/github.com/tonistiigi/fsutil/tarwriter.go new file mode 100644 index 00000000..06f28c55 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/tarwriter.go @@ -0,0 +1,76 @@ +package fsutil + +import ( + "archive/tar" + "context" + "io" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil/types" +) + +func WriteTar(ctx context.Context, fs FS, w io.Writer) error { + tw := tar.NewWriter(w) + err := fs.Walk(ctx, func(path string, fi os.FileInfo, err error) error { + stat, ok := fi.Sys().(*types.Stat) + if !ok { + return errors.Wrapf(err, "invalid fileinfo without stat info: %s", path) + } + hdr, err := tar.FileInfoHeader(fi, stat.Linkname) + if err != nil { + return err + } + + name := filepath.ToSlash(path) + if fi.IsDir() && !strings.HasSuffix(name, "/") { + name += "/" + } + hdr.Name = name + + hdr.Uid = int(stat.Uid) + hdr.Gid = int(stat.Gid) + hdr.Devmajor = stat.Devmajor + hdr.Devminor = stat.Devminor + hdr.Linkname = stat.Linkname + if hdr.Linkname != "" { + hdr.Size = 0 + if fi.Mode() & os.ModeSymlink != 0 { + hdr.Typeflag = tar.TypeSymlink + } else { + hdr.Typeflag = tar.TypeLink + } + } + + if len(stat.Xattrs) > 0 { + hdr.PAXRecords = map[string]string{} + } + for k, v := range stat.Xattrs { + hdr.PAXRecords["SCHILY.xattr."+k] = string(v) + } + + if err := tw.WriteHeader(hdr); err != nil { + return errors.Wrap(err, "failed to write file header") + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 && hdr.Linkname == "" { + rc, err := fs.Open(path) + if err != nil { + return err + } + if _, err := io.Copy(tw, rc); err != nil { + return err + } + if err := rc.Close(); err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + return tw.Close() +} diff --git a/vendor/github.com/tonistiigi/fsutil/types/generate.go b/vendor/github.com/tonistiigi/fsutil/types/generate.go new file mode 100644 index 00000000..5c03178f --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/types/generate.go @@ -0,0 +1,3 @@ +package types + +//go:generate protoc --gogoslick_out=. stat.proto wire.proto diff --git a/vendor/github.com/tonistiigi/fsutil/types/stat.go b/vendor/github.com/tonistiigi/fsutil/types/stat.go new file mode 100644 index 00000000..b79fd2bd --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/types/stat.go @@ -0,0 +1,7 @@ +package types + +import "os" + +func (s Stat) IsDir() bool { + return os.FileMode(s.Mode).IsDir() +} diff --git a/vendor/github.com/tonistiigi/fsutil/types/stat.pb.go b/vendor/github.com/tonistiigi/fsutil/types/stat.pb.go new file mode 100644 index 00000000..91200fb7 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/types/stat.pb.go @@ -0,0 +1,929 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: stat.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Stat struct { + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Mode uint32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` + Uid uint32 `protobuf:"varint,3,opt,name=uid,proto3" json:"uid,omitempty"` + Gid uint32 `protobuf:"varint,4,opt,name=gid,proto3" json:"gid,omitempty"` + Size_ int64 `protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"` + ModTime int64 `protobuf:"varint,6,opt,name=modTime,proto3" json:"modTime,omitempty"` + // int32 typeflag = 7; + Linkname string `protobuf:"bytes,7,opt,name=linkname,proto3" json:"linkname,omitempty"` + Devmajor int64 `protobuf:"varint,8,opt,name=devmajor,proto3" json:"devmajor,omitempty"` + Devminor int64 `protobuf:"varint,9,opt,name=devminor,proto3" json:"devminor,omitempty"` + Xattrs map[string][]byte `protobuf:"bytes,10,rep,name=xattrs,proto3" json:"xattrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *Stat) Reset() { *m = Stat{} } +func (*Stat) ProtoMessage() {} +func (*Stat) Descriptor() ([]byte, []int) { + return fileDescriptor_01fabdc1b78bd68b, []int{0} +} +func (m *Stat) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Stat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Stat.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Stat) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stat.Merge(m, src) +} +func (m *Stat) XXX_Size() int { + return m.Size() +} +func (m *Stat) XXX_DiscardUnknown() { + xxx_messageInfo_Stat.DiscardUnknown(m) +} + +var xxx_messageInfo_Stat proto.InternalMessageInfo + +func (m *Stat) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *Stat) GetMode() uint32 { + if m != nil { + return m.Mode + } + return 0 +} + +func (m *Stat) GetUid() uint32 { + if m != nil { + return m.Uid + } + return 0 +} + +func (m *Stat) GetGid() uint32 { + if m != nil { + return m.Gid + } + return 0 +} + +func (m *Stat) GetSize_() int64 { + if m != nil { + return m.Size_ + } + return 0 +} + +func (m *Stat) GetModTime() int64 { + if m != nil { + return m.ModTime + } + return 0 +} + +func (m *Stat) GetLinkname() string { + if m != nil { + return m.Linkname + } + return "" +} + +func (m *Stat) GetDevmajor() int64 { + if m != nil { + return m.Devmajor + } + return 0 +} + +func (m *Stat) GetDevminor() int64 { + if m != nil { + return m.Devminor + } + return 0 +} + +func (m *Stat) GetXattrs() map[string][]byte { + if m != nil { + return m.Xattrs + } + return nil +} + +func init() { + proto.RegisterType((*Stat)(nil), "fsutil.types.Stat") + proto.RegisterMapType((map[string][]byte)(nil), "fsutil.types.Stat.XattrsEntry") +} + +func init() { proto.RegisterFile("stat.proto", fileDescriptor_01fabdc1b78bd68b) } + +var fileDescriptor_01fabdc1b78bd68b = []byte{ + // 317 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x91, 0xbd, 0x4e, 0xc3, 0x30, + 0x14, 0x85, 0x73, 0x9b, 0xfe, 0xba, 0x45, 0x42, 0x16, 0x83, 0xd5, 0xe1, 0x2a, 0x62, 0xca, 0x94, + 0x01, 0x24, 0xc4, 0xcf, 0x86, 0xc4, 0x0b, 0x04, 0x06, 0xc4, 0x66, 0x64, 0x53, 0x4c, 0x9b, 0xb8, + 0x4a, 0xdc, 0x8a, 0x32, 0xf1, 0x08, 0x3c, 0x06, 0x6f, 0x02, 0x63, 0xc7, 0x8e, 0xd4, 0x5d, 0x18, + 0xfb, 0x08, 0xc8, 0x4e, 0x5b, 0xba, 0x9d, 0xf3, 0x9d, 0x7b, 0x95, 0x9c, 0x6b, 0x42, 0x4a, 0xc3, + 0x4d, 0x32, 0x2e, 0xb4, 0xd1, 0xb4, 0xf7, 0x54, 0x4e, 0x8c, 0x1a, 0x25, 0x66, 0x36, 0x96, 0xe5, + 0xf1, 0x57, 0x8d, 0xd4, 0x6f, 0x0d, 0x37, 0x94, 0x92, 0xfa, 0x98, 0x9b, 0x67, 0x06, 0x11, 0xc4, + 0x9d, 0xd4, 0x6b, 0xc7, 0x32, 0x2d, 0x24, 0xab, 0x45, 0x10, 0x1f, 0xa4, 0x5e, 0xd3, 0x43, 0x12, + 0x4e, 0x94, 0x60, 0xa1, 0x47, 0x4e, 0x3a, 0x32, 0x50, 0x82, 0xd5, 0x2b, 0x32, 0x50, 0xc2, 0xed, + 0x95, 0xea, 0x4d, 0xb2, 0x46, 0x04, 0x71, 0x98, 0x7a, 0x4d, 0x19, 0x69, 0x65, 0x5a, 0xdc, 0xa9, + 0x4c, 0xb2, 0xa6, 0xc7, 0x5b, 0x4b, 0xfb, 0xa4, 0x3d, 0x52, 0xf9, 0x30, 0xe7, 0x99, 0x64, 0x2d, + 0xff, 0xf5, 0x9d, 0x77, 0x99, 0x90, 0xd3, 0x8c, 0xbf, 0xe8, 0x82, 0xb5, 0xfd, 0xda, 0xce, 0x6f, + 0x33, 0x95, 0xeb, 0x82, 0x75, 0xfe, 0x33, 0xe7, 0xe9, 0x19, 0x69, 0xbe, 0x72, 0x63, 0x8a, 0x92, + 0x91, 0x28, 0x8c, 0xbb, 0x27, 0x98, 0xec, 0xb7, 0x4e, 0x5c, 0xe3, 0xe4, 0xde, 0x0f, 0xdc, 0xe4, + 0xa6, 0x98, 0xa5, 0x9b, 0xe9, 0xfe, 0x05, 0xe9, 0xee, 0x61, 0x57, 0x6d, 0x28, 0x67, 0x9b, 0x9b, + 0x38, 0x49, 0x8f, 0x48, 0x63, 0xca, 0x47, 0x93, 0xea, 0x26, 0xbd, 0xb4, 0x32, 0x97, 0xb5, 0x73, + 0xb8, 0xbe, 0x9a, 0x2f, 0x31, 0x58, 0x2c, 0x31, 0x58, 0x2f, 0x11, 0xde, 0x2d, 0xc2, 0xa7, 0x45, + 0xf8, 0xb6, 0x08, 0x73, 0x8b, 0xf0, 0x63, 0x11, 0x7e, 0x2d, 0x06, 0x6b, 0x8b, 0xf0, 0xb1, 0xc2, + 0x60, 0xbe, 0xc2, 0x60, 0xb1, 0xc2, 0xe0, 0xa1, 0xe1, 0x7f, 0xe8, 0xb1, 0xe9, 0xdf, 0xe6, 0xf4, + 0x2f, 0x00, 0x00, 0xff, 0xff, 0x06, 0x97, 0xf3, 0xd7, 0xa9, 0x01, 0x00, 0x00, +} + +func (this *Stat) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Stat) + if !ok { + that2, ok := that.(Stat) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Path != that1.Path { + return false + } + if this.Mode != that1.Mode { + return false + } + if this.Uid != that1.Uid { + return false + } + if this.Gid != that1.Gid { + return false + } + if this.Size_ != that1.Size_ { + return false + } + if this.ModTime != that1.ModTime { + return false + } + if this.Linkname != that1.Linkname { + return false + } + if this.Devmajor != that1.Devmajor { + return false + } + if this.Devminor != that1.Devminor { + return false + } + if len(this.Xattrs) != len(that1.Xattrs) { + return false + } + for i := range this.Xattrs { + if !bytes.Equal(this.Xattrs[i], that1.Xattrs[i]) { + return false + } + } + return true +} +func (this *Stat) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&types.Stat{") + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + s = append(s, "Mode: "+fmt.Sprintf("%#v", this.Mode)+",\n") + s = append(s, "Uid: "+fmt.Sprintf("%#v", this.Uid)+",\n") + s = append(s, "Gid: "+fmt.Sprintf("%#v", this.Gid)+",\n") + s = append(s, "Size_: "+fmt.Sprintf("%#v", this.Size_)+",\n") + s = append(s, "ModTime: "+fmt.Sprintf("%#v", this.ModTime)+",\n") + s = append(s, "Linkname: "+fmt.Sprintf("%#v", this.Linkname)+",\n") + s = append(s, "Devmajor: "+fmt.Sprintf("%#v", this.Devmajor)+",\n") + s = append(s, "Devminor: "+fmt.Sprintf("%#v", this.Devminor)+",\n") + keysForXattrs := make([]string, 0, len(this.Xattrs)) + for k, _ := range this.Xattrs { + keysForXattrs = append(keysForXattrs, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForXattrs) + mapStringForXattrs := "map[string][]byte{" + for _, k := range keysForXattrs { + mapStringForXattrs += fmt.Sprintf("%#v: %#v,", k, this.Xattrs[k]) + } + mapStringForXattrs += "}" + if this.Xattrs != nil { + s = append(s, "Xattrs: "+mapStringForXattrs+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringStat(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Stat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Stat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Stat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Xattrs) > 0 { + for k := range m.Xattrs { + v := m.Xattrs[k] + baseI := i + if len(v) > 0 { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintStat(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintStat(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintStat(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if m.Devminor != 0 { + i = encodeVarintStat(dAtA, i, uint64(m.Devminor)) + i-- + dAtA[i] = 0x48 + } + if m.Devmajor != 0 { + i = encodeVarintStat(dAtA, i, uint64(m.Devmajor)) + i-- + dAtA[i] = 0x40 + } + if len(m.Linkname) > 0 { + i -= len(m.Linkname) + copy(dAtA[i:], m.Linkname) + i = encodeVarintStat(dAtA, i, uint64(len(m.Linkname))) + i-- + dAtA[i] = 0x3a + } + if m.ModTime != 0 { + i = encodeVarintStat(dAtA, i, uint64(m.ModTime)) + i-- + dAtA[i] = 0x30 + } + if m.Size_ != 0 { + i = encodeVarintStat(dAtA, i, uint64(m.Size_)) + i-- + dAtA[i] = 0x28 + } + if m.Gid != 0 { + i = encodeVarintStat(dAtA, i, uint64(m.Gid)) + i-- + dAtA[i] = 0x20 + } + if m.Uid != 0 { + i = encodeVarintStat(dAtA, i, uint64(m.Uid)) + i-- + dAtA[i] = 0x18 + } + if m.Mode != 0 { + i = encodeVarintStat(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x10 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintStat(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintStat(dAtA []byte, offset int, v uint64) int { + offset -= sovStat(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Stat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovStat(uint64(l)) + } + if m.Mode != 0 { + n += 1 + sovStat(uint64(m.Mode)) + } + if m.Uid != 0 { + n += 1 + sovStat(uint64(m.Uid)) + } + if m.Gid != 0 { + n += 1 + sovStat(uint64(m.Gid)) + } + if m.Size_ != 0 { + n += 1 + sovStat(uint64(m.Size_)) + } + if m.ModTime != 0 { + n += 1 + sovStat(uint64(m.ModTime)) + } + l = len(m.Linkname) + if l > 0 { + n += 1 + l + sovStat(uint64(l)) + } + if m.Devmajor != 0 { + n += 1 + sovStat(uint64(m.Devmajor)) + } + if m.Devminor != 0 { + n += 1 + sovStat(uint64(m.Devminor)) + } + if len(m.Xattrs) > 0 { + for k, v := range m.Xattrs { + _ = k + _ = v + l = 0 + if len(v) > 0 { + l = 1 + len(v) + sovStat(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovStat(uint64(len(k))) + l + n += mapEntrySize + 1 + sovStat(uint64(mapEntrySize)) + } + } + return n +} + +func sovStat(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozStat(x uint64) (n int) { + return sovStat(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Stat) String() string { + if this == nil { + return "nil" + } + keysForXattrs := make([]string, 0, len(this.Xattrs)) + for k, _ := range this.Xattrs { + keysForXattrs = append(keysForXattrs, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForXattrs) + mapStringForXattrs := "map[string][]byte{" + for _, k := range keysForXattrs { + mapStringForXattrs += fmt.Sprintf("%v: %v,", k, this.Xattrs[k]) + } + mapStringForXattrs += "}" + s := strings.Join([]string{`&Stat{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Uid:` + fmt.Sprintf("%v", this.Uid) + `,`, + `Gid:` + fmt.Sprintf("%v", this.Gid) + `,`, + `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, + `ModTime:` + fmt.Sprintf("%v", this.ModTime) + `,`, + `Linkname:` + fmt.Sprintf("%v", this.Linkname) + `,`, + `Devmajor:` + fmt.Sprintf("%v", this.Devmajor) + `,`, + `Devminor:` + fmt.Sprintf("%v", this.Devminor) + `,`, + `Xattrs:` + mapStringForXattrs + `,`, + `}`, + }, "") + return s +} +func valueToStringStat(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Stat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Stat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Stat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStat + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStat + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + m.Uid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Uid |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType) + } + m.Gid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Gid |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ModTime", wireType) + } + m.ModTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ModTime |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linkname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStat + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStat + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Linkname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Devmajor", wireType) + } + m.Devmajor = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Devmajor |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Devminor", wireType) + } + m.Devminor = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Devminor |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Xattrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStat + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStat + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Xattrs == nil { + m.Xattrs = make(map[string][]byte) + } + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStat + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthStat + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthStat + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthStat + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipStat(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStat + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Xattrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStat(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStat + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStat + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStat(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStat + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStat + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStat + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthStat + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupStat + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthStat + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthStat = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStat = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupStat = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/tonistiigi/fsutil/types/stat.proto b/vendor/github.com/tonistiigi/fsutil/types/stat.proto new file mode 100644 index 00000000..4138be69 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/types/stat.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package fsutil.types; + +option go_package = "types"; + +message Stat { + string path = 1; + uint32 mode = 2; + uint32 uid = 3; + uint32 gid = 4; + int64 size = 5; + int64 modTime = 6; + // int32 typeflag = 7; + string linkname = 7; + int64 devmajor = 8; + int64 devminor = 9; + map xattrs = 10; +} \ No newline at end of file diff --git a/vendor/github.com/tonistiigi/fsutil/types/wire.pb.go b/vendor/github.com/tonistiigi/fsutil/types/wire.pb.go new file mode 100644 index 00000000..9e22269e --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/types/wire.pb.go @@ -0,0 +1,575 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: wire.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Packet_PacketType int32 + +const ( + PACKET_STAT Packet_PacketType = 0 + PACKET_REQ Packet_PacketType = 1 + PACKET_DATA Packet_PacketType = 2 + PACKET_FIN Packet_PacketType = 3 + PACKET_ERR Packet_PacketType = 4 +) + +var Packet_PacketType_name = map[int32]string{ + 0: "PACKET_STAT", + 1: "PACKET_REQ", + 2: "PACKET_DATA", + 3: "PACKET_FIN", + 4: "PACKET_ERR", +} + +var Packet_PacketType_value = map[string]int32{ + "PACKET_STAT": 0, + "PACKET_REQ": 1, + "PACKET_DATA": 2, + "PACKET_FIN": 3, + "PACKET_ERR": 4, +} + +func (Packet_PacketType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_f2dcdddcdf68d8e0, []int{0, 0} +} + +type Packet struct { + Type Packet_PacketType `protobuf:"varint,1,opt,name=type,proto3,enum=fsutil.types.Packet_PacketType" json:"type,omitempty"` + Stat *Stat `protobuf:"bytes,2,opt,name=stat,proto3" json:"stat,omitempty"` + ID uint32 `protobuf:"varint,3,opt,name=ID,proto3" json:"ID,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *Packet) Reset() { *m = Packet{} } +func (*Packet) ProtoMessage() {} +func (*Packet) Descriptor() ([]byte, []int) { + return fileDescriptor_f2dcdddcdf68d8e0, []int{0} +} +func (m *Packet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Packet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Packet.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Packet) XXX_Merge(src proto.Message) { + xxx_messageInfo_Packet.Merge(m, src) +} +func (m *Packet) XXX_Size() int { + return m.Size() +} +func (m *Packet) XXX_DiscardUnknown() { + xxx_messageInfo_Packet.DiscardUnknown(m) +} + +var xxx_messageInfo_Packet proto.InternalMessageInfo + +func (m *Packet) GetType() Packet_PacketType { + if m != nil { + return m.Type + } + return PACKET_STAT +} + +func (m *Packet) GetStat() *Stat { + if m != nil { + return m.Stat + } + return nil +} + +func (m *Packet) GetID() uint32 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *Packet) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterEnum("fsutil.types.Packet_PacketType", Packet_PacketType_name, Packet_PacketType_value) + proto.RegisterType((*Packet)(nil), "fsutil.types.Packet") +} + +func init() { proto.RegisterFile("wire.proto", fileDescriptor_f2dcdddcdf68d8e0) } + +var fileDescriptor_f2dcdddcdf68d8e0 = []byte{ + // 276 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xcf, 0x2c, 0x4a, + 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2b, 0x2e, 0x2d, 0xc9, 0xcc, 0xd1, 0x2b, + 0xa9, 0x2c, 0x48, 0x2d, 0x96, 0xe2, 0x2a, 0x2e, 0x49, 0x2c, 0x81, 0xc8, 0x28, 0xbd, 0x64, 0xe4, + 0x62, 0x0b, 0x48, 0x4c, 0xce, 0x4e, 0x2d, 0x11, 0x32, 0xe6, 0x62, 0x01, 0xc9, 0x4b, 0x30, 0x2a, + 0x30, 0x6a, 0xf0, 0x19, 0xc9, 0xeb, 0x21, 0xeb, 0xd1, 0x83, 0xa8, 0x81, 0x52, 0x21, 0x95, 0x05, + 0xa9, 0x41, 0x60, 0xc5, 0x42, 0x6a, 0x5c, 0x2c, 0x20, 0xd3, 0x24, 0x98, 0x14, 0x18, 0x35, 0xb8, + 0x8d, 0x84, 0x50, 0x35, 0x05, 0x97, 0x24, 0x96, 0x04, 0x81, 0xe5, 0x85, 0xf8, 0xb8, 0x98, 0x3c, + 0x5d, 0x24, 0x98, 0x15, 0x18, 0x35, 0x78, 0x83, 0x98, 0x3c, 0x5d, 0x84, 0x84, 0xb8, 0x58, 0x52, + 0x12, 0x4b, 0x12, 0x25, 0x58, 0x14, 0x18, 0x35, 0x78, 0x82, 0xc0, 0x6c, 0xa5, 0x38, 0x2e, 0x2e, + 0x84, 0xf9, 0x42, 0xfc, 0x5c, 0xdc, 0x01, 0x8e, 0xce, 0xde, 0xae, 0x21, 0xf1, 0xc1, 0x21, 0x8e, + 0x21, 0x02, 0x0c, 0x42, 0x7c, 0x5c, 0x5c, 0x50, 0x81, 0x20, 0xd7, 0x40, 0x01, 0x46, 0x24, 0x05, + 0x2e, 0x8e, 0x21, 0x8e, 0x02, 0x4c, 0x48, 0x0a, 0xdc, 0x3c, 0xfd, 0x04, 0x98, 0x91, 0xf8, 0xae, + 0x41, 0x41, 0x02, 0x2c, 0x4e, 0xd6, 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, + 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, + 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, + 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x58, 0xc1, + 0x7e, 0x49, 0x62, 0x03, 0x87, 0x97, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x3f, 0x9d, 0xe3, 0x51, + 0x57, 0x01, 0x00, 0x00, +} + +func (x Packet_PacketType) String() string { + s, ok := Packet_PacketType_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Packet) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Packet) + if !ok { + that2, ok := that.(Packet) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.Stat.Equal(that1.Stat) { + return false + } + if this.ID != that1.ID { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *Packet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&types.Packet{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.Stat != nil { + s = append(s, "Stat: "+fmt.Sprintf("%#v", this.Stat)+",\n") + } + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringWire(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Packet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Packet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Packet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintWire(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x22 + } + if m.ID != 0 { + i = encodeVarintWire(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x18 + } + if m.Stat != nil { + { + size, err := m.Stat.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWire(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintWire(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintWire(dAtA []byte, offset int, v uint64) int { + offset -= sovWire(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Packet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovWire(uint64(m.Type)) + } + if m.Stat != nil { + l = m.Stat.Size() + n += 1 + l + sovWire(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovWire(uint64(m.ID)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovWire(uint64(l)) + } + return n +} + +func sovWire(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWire(x uint64) (n int) { + return sovWire(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Packet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Packet{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Stat:` + strings.Replace(fmt.Sprintf("%v", this.Stat), "Stat", "Stat", 1) + `,`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func valueToStringWire(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Packet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Packet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Packet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Packet_PacketType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stat", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWire + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWire + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Stat == nil { + m.Stat = &Stat{} + } + if err := m.Stat.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthWire + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthWire + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWire(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWire + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWire + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWire(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWire + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupWire + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthWire + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthWire = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWire = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupWire = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/tonistiigi/fsutil/types/wire.proto b/vendor/github.com/tonistiigi/fsutil/types/wire.proto new file mode 100644 index 00000000..3e85000c --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/types/wire.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package fsutil.types; + +option go_package = "types"; + +import "stat.proto"; + +message Packet { + enum PacketType { + PACKET_STAT = 0; + PACKET_REQ = 1; + PACKET_DATA = 2; + PACKET_FIN = 3; + PACKET_ERR = 4; + } + PacketType type = 1; + Stat stat = 2; + uint32 ID = 3; + bytes data = 4; +} diff --git a/vendor/github.com/tonistiigi/fsutil/validator.go b/vendor/github.com/tonistiigi/fsutil/validator.go new file mode 100644 index 00000000..2bd1287a --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/validator.go @@ -0,0 +1,92 @@ +package fsutil + +import ( + "os" + "path" + "runtime" + "sort" + "strings" + + "github.com/pkg/errors" +) + +type parent struct { + dir string + last string +} + +type Validator struct { + parentDirs []parent +} + +func (v *Validator) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { + if err != nil { + return err + } + // test that all paths are in order and all parent dirs were present + if v.parentDirs == nil { + v.parentDirs = make([]parent, 1, 10) + } + if runtime.GOOS == "windows" { + p = strings.Replace(p, "\\", "", -1) + } + if p != path.Clean(p) { + return errors.Errorf("invalid unclean path %s", p) + } + if path.IsAbs(p) { + return errors.Errorf("abolute path %s not allowed", p) + } + dir := path.Dir(p) + base := path.Base(p) + if dir == "." { + dir = "" + } + if dir == ".." || strings.HasPrefix(p, "../") { + return errors.Errorf("invalid path: %s", p) + } + + // find a parent dir from saved records + i := sort.Search(len(v.parentDirs), func(i int) bool { + return ComparePath(v.parentDirs[len(v.parentDirs)-1-i].dir, dir) <= 0 + }) + i = len(v.parentDirs) - 1 - i + if i != len(v.parentDirs)-1 { // skipping back to grandparent + v.parentDirs = v.parentDirs[:i+1] + } + + if dir != v.parentDirs[len(v.parentDirs)-1].dir || v.parentDirs[i].last >= base { + return errors.Errorf("changes out of order: %q %q", p, path.Join(v.parentDirs[i].dir, v.parentDirs[i].last)) + } + v.parentDirs[i].last = base + if kind != ChangeKindDelete && fi.IsDir() { + v.parentDirs = append(v.parentDirs, parent{ + dir: path.Join(dir, base), + last: "", + }) + } + // todo: validate invalid mode combinations + return err +} + +func ComparePath(p1, p2 string) int { + // byte-by-byte comparison to be compatible with str<>str + min := min(len(p1), len(p2)) + for i := 0; i < min; i++ { + switch { + case p1[i] == p2[i]: + continue + case p2[i] != '/' && p1[i] < p2[i] || p1[i] == '/': + return -1 + default: + return 1 + } + } + return len(p1) - len(p2) +} + +func min(x, y int) int { + if x < y { + return x + } + return y +} diff --git a/vendor/github.com/tonistiigi/fsutil/walker.go b/vendor/github.com/tonistiigi/fsutil/walker.go new file mode 100644 index 00000000..e0518e23 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/walker.go @@ -0,0 +1,230 @@ +package fsutil + +import ( + "context" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/fileutils" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil/types" +) + +type WalkOpt struct { + IncludePatterns []string + ExcludePatterns []string + // FollowPaths contains symlinks that are resolved into include patterns + // before performing the fs walk + FollowPaths []string + Map FilterFunc +} + +func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) error { + root, err := filepath.EvalSymlinks(p) + if err != nil { + return errors.Wrapf(err, "failed to resolve %s", root) + } + fi, err := os.Stat(root) + if err != nil { + return errors.Wrapf(err, "failed to stat: %s", root) + } + if !fi.IsDir() { + return errors.Errorf("%s is not a directory", root) + } + + var pm *fileutils.PatternMatcher + if opt != nil && opt.ExcludePatterns != nil { + pm, err = fileutils.NewPatternMatcher(opt.ExcludePatterns) + if err != nil { + return errors.Wrapf(err, "invalid excludepaths %s", opt.ExcludePatterns) + } + } + + var includePatterns []string + if opt != nil && opt.IncludePatterns != nil { + includePatterns = make([]string, len(opt.IncludePatterns)) + for k := range opt.IncludePatterns { + includePatterns[k] = filepath.Clean(opt.IncludePatterns[k]) + } + } + if opt != nil && opt.FollowPaths != nil { + targets, err := FollowLinks(p, opt.FollowPaths) + if err != nil { + return err + } + if targets != nil { + includePatterns = append(includePatterns, targets...) + includePatterns = dedupePaths(includePatterns) + } + } + + var lastIncludedDir string + + seenFiles := make(map[uint64]string) + return filepath.Walk(root, func(path string, fi os.FileInfo, err error) (retErr error) { + if err != nil { + if os.IsNotExist(err) { + return filepath.SkipDir + } + return err + } + defer func() { + if retErr != nil && isNotExist(retErr) { + retErr = filepath.SkipDir + } + }() + origpath := path + path, err = filepath.Rel(root, path) + if err != nil { + return err + } + // Skip root + if path == "." { + return nil + } + + if opt != nil { + if includePatterns != nil { + skip := false + if lastIncludedDir != "" { + if strings.HasPrefix(path, lastIncludedDir+string(filepath.Separator)) { + skip = true + } + } + + if !skip { + matched := false + partial := true + for _, p := range includePatterns { + if ok, p := matchPrefix(p, path); ok { + matched = true + if !p { + partial = false + break + } + } + } + if !matched { + if fi.IsDir() { + return filepath.SkipDir + } + return nil + } + if !partial && fi.IsDir() { + lastIncludedDir = path + } + } + } + if pm != nil { + m, err := pm.Matches(path) + if err != nil { + return errors.Wrap(err, "failed to match excludepatterns") + } + + if m { + if fi.IsDir() { + if !pm.Exclusions() { + return filepath.SkipDir + } + dirSlash := path + string(filepath.Separator) + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + patStr := pat.String() + string(filepath.Separator) + if strings.HasPrefix(patStr, dirSlash) { + goto passedFilter + } + } + return filepath.SkipDir + } + return nil + } + } + } + + passedFilter: + stat, err := mkstat(origpath, path, fi, seenFiles) + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + if opt != nil && opt.Map != nil { + if allowed := opt.Map(stat.Path, stat); !allowed { + return nil + } + } + if err := fn(stat.Path, &StatInfo{stat}, nil); err != nil { + return err + } + } + return nil + }) +} + +type StatInfo struct { + *types.Stat +} + +func (s *StatInfo) Name() string { + return filepath.Base(s.Stat.Path) +} +func (s *StatInfo) Size() int64 { + return s.Stat.Size_ +} +func (s *StatInfo) Mode() os.FileMode { + return os.FileMode(s.Stat.Mode) +} +func (s *StatInfo) ModTime() time.Time { + return time.Unix(s.Stat.ModTime/1e9, s.Stat.ModTime%1e9) +} +func (s *StatInfo) IsDir() bool { + return s.Mode().IsDir() +} +func (s *StatInfo) Sys() interface{} { + return s.Stat +} + +func matchPrefix(pattern, name string) (bool, bool) { + count := strings.Count(name, string(filepath.Separator)) + partial := false + if strings.Count(pattern, string(filepath.Separator)) > count { + pattern = trimUntilIndex(pattern, string(filepath.Separator), count) + partial = true + } + m, _ := filepath.Match(pattern, name) + return m, partial +} + +func trimUntilIndex(str, sep string, count int) string { + s := str + i := 0 + c := 0 + for { + idx := strings.Index(s, sep) + s = s[idx+len(sep):] + i += idx + len(sep) + c++ + if c > count { + return str[:i-len(sep)] + } + } +} + +func isNotExist(err error) bool { + err = errors.Cause(err) + if os.IsNotExist(err) { + return true + } + if pe, ok := err.(*os.PathError); ok { + err = pe.Err + } + return err == syscall.ENOTDIR +} diff --git a/vendor/github.com/urfave/cli/.travis.yml b/vendor/github.com/urfave/cli/.travis.yml index e500b380..d36c2243 100644 --- a/vendor/github.com/urfave/cli/.travis.yml +++ b/vendor/github.com/urfave/cli/.travis.yml @@ -28,8 +28,8 @@ before_script: script: - go run build.go vet - go run build.go test - - go run build.go gfmrun - - go run build.go toc + - go run build.go gfmrun docs/v1/manual.md + - go run build.go toc docs/v1/manual.md after_success: - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/urfave/cli/CHANGELOG.md b/vendor/github.com/urfave/cli/CHANGELOG.md deleted file mode 100644 index 7c1a27ec..00000000 --- a/vendor/github.com/urfave/cli/CHANGELOG.md +++ /dev/null @@ -1,504 +0,0 @@ -# Change Log - -**ATTN**: This project uses [semantic versioning](http://semver.org/). - -## [Unreleased] - -## [1.22.1] - 2019-09-11 - -### Fixed - -* Hide output of hidden commands on man pages in [urfave/cli/pull/889](https://github.com/urfave/cli/pull/889) via [@crosbymichael](https://github.com/crosbymichael) -* Don't generate fish completion for hidden commands [urfave/cli/pull/891](https://github.com/urfave/891) via [@saschagrunert](https://github.com/saschagrunert) -* Using short flag names for required flags throws an error in [urfave/cli/pull/890](https://github.com/urfave/cli/pull/890) via [@asahasrabuddhe](https://github.com/asahasrabuddhe) - -### Changed - -* Remove flag code generation logic, legacy python test runner in [urfave/cli/pull/883](https://github.com/urfave/cli/pull/883) via [@asahasrabuddhe](https://github.com/asahasrabuddhe) -* Enable Go Modules support, drop support for `Go 1.10` add support for `Go 1.13` in [urfave/cli/pull/885](https://github.com/urfave/cli/pull/885) via [@asahasrabuddhe](https://github.com/asahasrabuddhe) - -## [1.22.0] - 2019-09-07 - -### Fixed - -* Fix Subcommands not falling back to `app.ExitEventHandler` in [urfave/cli/pull/856](https://github.com/urfave/cli/pull/856) via [@FaranIdo](https://github.com/FaranIdo) - -### Changed - -* Clarify that altsrc supports both TOML and JSON in [urfave/cli/pull/774](https://github.com/urfave/cli/pull/774) via [@whereswaldon](https://github.com/whereswaldon) -* Made the exit code example more clear in [urfave/cli/pull/823](https://github.com/urfave/cli/pull/823) via [@xordspar0](https://github.com/xordspar0) -* Removed the use of python for internal flag generation in [urfave/cli/pull/836](https://github.com/urfave/cli/pull/836) via [@asahasrabuddhe](https://github.com/asahasrabuddhe) -* Changed the supported go versions to `1.10`, `1.11`, `1.12` in [urfave/cli/pull/843](https://github.com/urfave/cli/pull/843) via [@lafriks](https://github.com/lafriks) -* Changed the v1 releases section in the readme in [urfave/cli/pull/862](https://github.com/urfave/cli/pull/862) via [@russoj88](https://github.com/russoj88) -* Cleaned up go modules in [urfave/cli/pull/874](https://github.com/urfave/cli/pull/874) via [@saschagrunert](https://github.com/saschagrunert) - -### Added - -* Added `UseShortOptionHandling` for combining short flags in [urfave/cli/pull/735](https://github.com/urfave/cli/pull/735) via [@rliebz](https://github.com/rliebz) -* Added support for flags bash completion in [urfave/cli/pull/808](https://github.com/urfave/cli/pull/808) via [@yogeshlonkar](https://github.com/yogeshlonkar) -* Added the `TakesFile` indicator to flag in [urfave/cli/pull/851](https://github.com/urfave/cli/pull/851) via [@saschagrunert](https://github.com/saschagrunert) -* Added fish shell completion support in [urfave/cli/pull/848](https://github.com/urfave/cli/pull/848) via [@saschagrunert](https://github.com/saschagrunert) - -## [1.21.0] - 2019-08-02 - -### Fixed - -* Fix using "slice" flag types with `EnvVar` in [urfave/cli/pull/687](https://github.com/urfave/cli/pull/687) via [@joshuarubin](https://github.com/joshuarubin) -* Fix regression of `SkipFlagParsing` behavior in [urfave/cli/pull/697](https://github.com/urfave/cli/pull/697) via [@jszwedko](https://github.com/jszwedko) -* Fix handling `ShortOptions` and `SkipArgReorder` in [urfave/cli/pull/686](https://github.com/urfave/cli/pull/686) via [@baude](https://github.com/baude) -* Fix args reordering when bool flags are present in [urfave/cli/pull/712](https://github.com/urfave/cli/pull/712) via [@windler](https://github.com/windler) -* Fix parsing of short options in [urfave/cli/pull/758](https://github.com/urfave/cli/pull/758) via [@vrothberg](https://github.com/vrothberg) -* Fix unaligned indents for the command help messages in [urfave/cli/pull/806](https://github.com/urfave/cli/pull/806) via [@mingrammer](https://github.com/mingrammer) - -### Changed - -* Cleaned up help output in [urfave/cli/pull/664](https://github.com/urfave/cli/pull/664) via [@maguro](https://github.com/maguro) -* Remove redundant nil checks in [urfave/cli/pull/773](https://github.com/urfave/cli/pull/773) via [@teresy](https://github.com/teresy) -* Case is now considered when sorting strings in [urfave/cli/pull/676](https://github.com/urfave/cli/pull/676) via [@rliebz](https://github.com/rliebz) - -### Added - -* Added _"required flags"_ support in [urfave/cli/pull/819](https://github.com/urfave/cli/pull/819) via [@lynncyrin](https://github.com/lynncyrin/) -* Backport JSON `InputSource` to v1 in [urfave/cli/pull/598](https://github.com/urfave/cli/pull/598) via [@jszwedko](https://github.com/jszwedko) -* Allow more customization of flag help strings in [urfave/cli/pull/661](https://github.com/urfave/cli/pull/661) via [@rliebz](https://github.com/rliebz) -* Allow custom `ExitError` handler function in [urfave/cli/pull/628](https://github.com/urfave/cli/pull/628) via [@phinnaeus](https://github.com/phinnaeus) -* Allow loading a variable from a file in [urfave/cli/pull/675](https://github.com/urfave/cli/pull/675) via [@jmccann](https://github.com/jmccann) -* Allow combining short bool names in [urfave/cli/pull/684](https://github.com/urfave/cli/pull/684) via [@baude](https://github.com/baude) -* Added test coverage to context in [urfave/cli/pull/788](https://github.com/urfave/cli/pull/788) via [@benzvan](https://github.com/benzvan) -* Added go module support in [urfave/cli/pull/831](https://github.com/urfave/cli/pull/831) via [@saschagrunert](https://github.com/saschagrunert) - -## [1.20.0] - 2017-08-10 - -### Fixed - -* `HandleExitCoder` is now correctly iterates over all errors in - a `MultiError`. The exit code is the exit code of the last error or `1` if - there are no `ExitCoder`s in the `MultiError`. -* Fixed YAML file loading on Windows (previously would fail validate the file path) -* Subcommand `Usage`, `Description`, `ArgsUsage`, `OnUsageError` correctly - propogated -* `ErrWriter` is now passed downwards through command structure to avoid the - need to redefine it -* Pass `Command` context into `OnUsageError` rather than parent context so that - all fields are avaiable -* Errors occuring in `Before` funcs are no longer double printed -* Use `UsageText` in the help templates for commands and subcommands if - defined; otherwise build the usage as before (was previously ignoring this - field) -* `IsSet` and `GlobalIsSet` now correctly return whether a flag is set if - a program calls `Set` or `GlobalSet` directly after flag parsing (would - previously only return `true` if the flag was set during parsing) - -### Changed - -* No longer exit the program on command/subcommand error if the error raised is - not an `OsExiter`. This exiting behavior was introduced in 1.19.0, but was - determined to be a regression in functionality. See [the - PR](https://github.com/urfave/cli/pull/595) for discussion. - -### Added - -* `CommandsByName` type was added to make it easy to sort `Command`s by name, - alphabetically -* `altsrc` now handles loading of string and int arrays from TOML -* Support for definition of custom help templates for `App` via - `CustomAppHelpTemplate` -* Support for arbitrary key/value fields on `App` to be used with - `CustomAppHelpTemplate` via `ExtraInfo` -* `HelpFlag`, `VersionFlag`, and `BashCompletionFlag` changed to explictly be - `cli.Flag`s allowing for the use of custom flags satisfying the `cli.Flag` - interface to be used. - - -## [1.19.1] - 2016-11-21 - -### Fixed - -- Fixes regression introduced in 1.19.0 where using an `ActionFunc` as - the `Action` for a command would cause it to error rather than calling the - function. Should not have a affected declarative cases using `func(c - *cli.Context) err)`. -- Shell completion now handles the case where the user specifies - `--generate-bash-completion` immediately after a flag that takes an argument. - Previously it call the application with `--generate-bash-completion` as the - flag value. - -## [1.19.0] - 2016-11-19 -### Added -- `FlagsByName` was added to make it easy to sort flags (e.g. `sort.Sort(cli.FlagsByName(app.Flags))`) -- A `Description` field was added to `App` for a more detailed description of - the application (similar to the existing `Description` field on `Command`) -- Flag type code generation via `go generate` -- Write to stderr and exit 1 if action returns non-nil error -- Added support for TOML to the `altsrc` loader -- `SkipArgReorder` was added to allow users to skip the argument reordering. - This is useful if you want to consider all "flags" after an argument as - arguments rather than flags (the default behavior of the stdlib `flag` - library). This is backported functionality from the [removal of the flag - reordering](https://github.com/urfave/cli/pull/398) in the unreleased version - 2 -- For formatted errors (those implementing `ErrorFormatter`), the errors will - be formatted during output. Compatible with `pkg/errors`. - -### Changed -- Raise minimum tested/supported Go version to 1.2+ - -### Fixed -- Consider empty environment variables as set (previously environment variables - with the equivalent of `""` would be skipped rather than their value used). -- Return an error if the value in a given environment variable cannot be parsed - as the flag type. Previously these errors were silently swallowed. -- Print full error when an invalid flag is specified (which includes the invalid flag) -- `App.Writer` defaults to `stdout` when `nil` -- If no action is specified on a command or app, the help is now printed instead of `panic`ing -- `App.Metadata` is initialized automatically now (previously was `nil` unless initialized) -- Correctly show help message if `-h` is provided to a subcommand -- `context.(Global)IsSet` now respects environment variables. Previously it - would return `false` if a flag was specified in the environment rather than - as an argument -- Removed deprecation warnings to STDERR to avoid them leaking to the end-user -- `altsrc`s import paths were updated to use `gopkg.in/urfave/cli.v1`. This - fixes issues that occurred when `gopkg.in/urfave/cli.v1` was imported as well - as `altsrc` where Go would complain that the types didn't match - -## [1.18.1] - 2016-08-28 -### Fixed -- Removed deprecation warnings to STDERR to avoid them leaking to the end-user (backported) - -## [1.18.0] - 2016-06-27 -### Added -- `./runtests` test runner with coverage tracking by default -- testing on OS X -- testing on Windows -- `UintFlag`, `Uint64Flag`, and `Int64Flag` types and supporting code - -### Changed -- Use spaces for alignment in help/usage output instead of tabs, making the - output alignment consistent regardless of tab width - -### Fixed -- Printing of command aliases in help text -- Printing of visible flags for both struct and struct pointer flags -- Display the `help` subcommand when using `CommandCategories` -- No longer swallows `panic`s that occur within the `Action`s themselves when - detecting the signature of the `Action` field - -## [1.17.1] - 2016-08-28 -### Fixed -- Removed deprecation warnings to STDERR to avoid them leaking to the end-user - -## [1.17.0] - 2016-05-09 -### Added -- Pluggable flag-level help text rendering via `cli.DefaultFlagStringFunc` -- `context.GlobalBoolT` was added as an analogue to `context.GlobalBool` -- Support for hiding commands by setting `Hidden: true` -- this will hide the - commands in help output - -### Changed -- `Float64Flag`, `IntFlag`, and `DurationFlag` default values are no longer - quoted in help text output. -- All flag types now include `(default: {value})` strings following usage when a - default value can be (reasonably) detected. -- `IntSliceFlag` and `StringSliceFlag` usage strings are now more consistent - with non-slice flag types -- Apps now exit with a code of 3 if an unknown subcommand is specified - (previously they printed "No help topic for...", but still exited 0. This - makes it easier to script around apps built using `cli` since they can trust - that a 0 exit code indicated a successful execution. -- cleanups based on [Go Report Card - feedback](https://goreportcard.com/report/github.com/urfave/cli) - -## [1.16.1] - 2016-08-28 -### Fixed -- Removed deprecation warnings to STDERR to avoid them leaking to the end-user - -## [1.16.0] - 2016-05-02 -### Added -- `Hidden` field on all flag struct types to omit from generated help text - -### Changed -- `BashCompletionFlag` (`--enable-bash-completion`) is now omitted from -generated help text via the `Hidden` field - -### Fixed -- handling of error values in `HandleAction` and `HandleExitCoder` - -## [1.15.0] - 2016-04-30 -### Added -- This file! -- Support for placeholders in flag usage strings -- `App.Metadata` map for arbitrary data/state management -- `Set` and `GlobalSet` methods on `*cli.Context` for altering values after -parsing. -- Support for nested lookup of dot-delimited keys in structures loaded from -YAML. - -### Changed -- The `App.Action` and `Command.Action` now prefer a return signature of -`func(*cli.Context) error`, as defined by `cli.ActionFunc`. If a non-nil -`error` is returned, there may be two outcomes: - - If the error fulfills `cli.ExitCoder`, then `os.Exit` will be called - automatically - - Else the error is bubbled up and returned from `App.Run` -- Specifying an `Action` with the legacy return signature of -`func(*cli.Context)` will produce a deprecation message to stderr -- Specifying an `Action` that is not a `func` type will produce a non-zero exit -from `App.Run` -- Specifying an `Action` func that has an invalid (input) signature will -produce a non-zero exit from `App.Run` - -### Deprecated -- -`cli.App.RunAndExitOnError`, which should now be done by returning an error -that fulfills `cli.ExitCoder` to `cli.App.Run`. -- the legacy signature for -`cli.App.Action` of `func(*cli.Context)`, which should now have a return -signature of `func(*cli.Context) error`, as defined by `cli.ActionFunc`. - -### Fixed -- Added missing `*cli.Context.GlobalFloat64` method - -## [1.14.0] - 2016-04-03 (backfilled 2016-04-25) -### Added -- Codebeat badge -- Support for categorization via `CategorizedHelp` and `Categories` on app. - -### Changed -- Use `filepath.Base` instead of `path.Base` in `Name` and `HelpName`. - -### Fixed -- Ensure version is not shown in help text when `HideVersion` set. - -## [1.13.0] - 2016-03-06 (backfilled 2016-04-25) -### Added -- YAML file input support. -- `NArg` method on context. - -## [1.12.0] - 2016-02-17 (backfilled 2016-04-25) -### Added -- Custom usage error handling. -- Custom text support in `USAGE` section of help output. -- Improved help messages for empty strings. -- AppVeyor CI configuration. - -### Changed -- Removed `panic` from default help printer func. -- De-duping and optimizations. - -### Fixed -- Correctly handle `Before`/`After` at command level when no subcommands. -- Case of literal `-` argument causing flag reordering. -- Environment variable hints on Windows. -- Docs updates. - -## [1.11.1] - 2015-12-21 (backfilled 2016-04-25) -### Changed -- Use `path.Base` in `Name` and `HelpName` -- Export `GetName` on flag types. - -### Fixed -- Flag parsing when skipping is enabled. -- Test output cleanup. -- Move completion check to account for empty input case. - -## [1.11.0] - 2015-11-15 (backfilled 2016-04-25) -### Added -- Destination scan support for flags. -- Testing against `tip` in Travis CI config. - -### Changed -- Go version in Travis CI config. - -### Fixed -- Removed redundant tests. -- Use correct example naming in tests. - -## [1.10.2] - 2015-10-29 (backfilled 2016-04-25) -### Fixed -- Remove unused var in bash completion. - -## [1.10.1] - 2015-10-21 (backfilled 2016-04-25) -### Added -- Coverage and reference logos in README. - -### Fixed -- Use specified values in help and version parsing. -- Only display app version and help message once. - -## [1.10.0] - 2015-10-06 (backfilled 2016-04-25) -### Added -- More tests for existing functionality. -- `ArgsUsage` at app and command level for help text flexibility. - -### Fixed -- Honor `HideHelp` and `HideVersion` in `App.Run`. -- Remove juvenile word from README. - -## [1.9.0] - 2015-09-08 (backfilled 2016-04-25) -### Added -- `FullName` on command with accompanying help output update. -- Set default `$PROG` in bash completion. - -### Changed -- Docs formatting. - -### Fixed -- Removed self-referential imports in tests. - -## [1.8.0] - 2015-06-30 (backfilled 2016-04-25) -### Added -- Support for `Copyright` at app level. -- `Parent` func at context level to walk up context lineage. - -### Fixed -- Global flag processing at top level. - -## [1.7.1] - 2015-06-11 (backfilled 2016-04-25) -### Added -- Aggregate errors from `Before`/`After` funcs. -- Doc comments on flag structs. -- Include non-global flags when checking version and help. -- Travis CI config updates. - -### Fixed -- Ensure slice type flags have non-nil values. -- Collect global flags from the full command hierarchy. -- Docs prose. - -## [1.7.0] - 2015-05-03 (backfilled 2016-04-25) -### Changed -- `HelpPrinter` signature includes output writer. - -### Fixed -- Specify go 1.1+ in docs. -- Set `Writer` when running command as app. - -## [1.6.0] - 2015-03-23 (backfilled 2016-04-25) -### Added -- Multiple author support. -- `NumFlags` at context level. -- `Aliases` at command level. - -### Deprecated -- `ShortName` at command level. - -### Fixed -- Subcommand help output. -- Backward compatible support for deprecated `Author` and `Email` fields. -- Docs regarding `Names`/`Aliases`. - -## [1.5.0] - 2015-02-20 (backfilled 2016-04-25) -### Added -- `After` hook func support at app and command level. - -### Fixed -- Use parsed context when running command as subcommand. -- Docs prose. - -## [1.4.1] - 2015-01-09 (backfilled 2016-04-25) -### Added -- Support for hiding `-h / --help` flags, but not `help` subcommand. -- Stop flag parsing after `--`. - -### Fixed -- Help text for generic flags to specify single value. -- Use double quotes in output for defaults. -- Use `ParseInt` instead of `ParseUint` for int environment var values. -- Use `0` as base when parsing int environment var values. - -## [1.4.0] - 2014-12-12 (backfilled 2016-04-25) -### Added -- Support for environment variable lookup "cascade". -- Support for `Stdout` on app for output redirection. - -### Fixed -- Print command help instead of app help in `ShowCommandHelp`. - -## [1.3.1] - 2014-11-13 (backfilled 2016-04-25) -### Added -- Docs and example code updates. - -### Changed -- Default `-v / --version` flag made optional. - -## [1.3.0] - 2014-08-10 (backfilled 2016-04-25) -### Added -- `FlagNames` at context level. -- Exposed `VersionPrinter` var for more control over version output. -- Zsh completion hook. -- `AUTHOR` section in default app help template. -- Contribution guidelines. -- `DurationFlag` type. - -## [1.2.0] - 2014-08-02 -### Added -- Support for environment variable defaults on flags plus tests. - -## [1.1.0] - 2014-07-15 -### Added -- Bash completion. -- Optional hiding of built-in help command. -- Optional skipping of flag parsing at command level. -- `Author`, `Email`, and `Compiled` metadata on app. -- `Before` hook func support at app and command level. -- `CommandNotFound` func support at app level. -- Command reference available on context. -- `GenericFlag` type. -- `Float64Flag` type. -- `BoolTFlag` type. -- `IsSet` flag helper on context. -- More flag lookup funcs at context level. -- More tests & docs. - -### Changed -- Help template updates to account for presence/absence of flags. -- Separated subcommand help template. -- Exposed `HelpPrinter` var for more control over help output. - -## [1.0.0] - 2013-11-01 -### Added -- `help` flag in default app flag set and each command flag set. -- Custom handling of argument parsing errors. -- Command lookup by name at app level. -- `StringSliceFlag` type and supporting `StringSlice` type. -- `IntSliceFlag` type and supporting `IntSlice` type. -- Slice type flag lookups by name at context level. -- Export of app and command help functions. -- More tests & docs. - -## 0.1.0 - 2013-07-22 -### Added -- Initial implementation. - -[Unreleased]: https://github.com/urfave/cli/compare/v1.22.1...HEAD -[1.22.1]: https://github.com/urfave/cli/compare/v1.22.0...v1.22.1 -[1.22.0]: https://github.com/urfave/cli/compare/v1.21.0...v1.22.0 -[1.21.0]: https://github.com/urfave/cli/compare/v1.20.0...v1.21.0 -[1.20.0]: https://github.com/urfave/cli/compare/v1.19.1...v1.20.0 -[1.19.1]: https://github.com/urfave/cli/compare/v1.19.0...v1.19.1 -[1.19.0]: https://github.com/urfave/cli/compare/v1.18.0...v1.19.0 -[1.18.0]: https://github.com/urfave/cli/compare/v1.17.0...v1.18.0 -[1.17.0]: https://github.com/urfave/cli/compare/v1.16.0...v1.17.0 -[1.16.0]: https://github.com/urfave/cli/compare/v1.15.0...v1.16.0 -[1.15.0]: https://github.com/urfave/cli/compare/v1.14.0...v1.15.0 -[1.14.0]: https://github.com/urfave/cli/compare/v1.13.0...v1.14.0 -[1.13.0]: https://github.com/urfave/cli/compare/v1.12.0...v1.13.0 -[1.12.0]: https://github.com/urfave/cli/compare/v1.11.1...v1.12.0 -[1.11.1]: https://github.com/urfave/cli/compare/v1.11.0...v1.11.1 -[1.11.0]: https://github.com/urfave/cli/compare/v1.10.2...v1.11.0 -[1.10.2]: https://github.com/urfave/cli/compare/v1.10.1...v1.10.2 -[1.10.1]: https://github.com/urfave/cli/compare/v1.10.0...v1.10.1 -[1.10.0]: https://github.com/urfave/cli/compare/v1.9.0...v1.10.0 -[1.9.0]: https://github.com/urfave/cli/compare/v1.8.0...v1.9.0 -[1.8.0]: https://github.com/urfave/cli/compare/v1.7.1...v1.8.0 -[1.7.1]: https://github.com/urfave/cli/compare/v1.7.0...v1.7.1 -[1.7.0]: https://github.com/urfave/cli/compare/v1.6.0...v1.7.0 -[1.6.0]: https://github.com/urfave/cli/compare/v1.5.0...v1.6.0 -[1.5.0]: https://github.com/urfave/cli/compare/v1.4.1...v1.5.0 -[1.4.1]: https://github.com/urfave/cli/compare/v1.4.0...v1.4.1 -[1.4.0]: https://github.com/urfave/cli/compare/v1.3.1...v1.4.0 -[1.3.1]: https://github.com/urfave/cli/compare/v1.3.0...v1.3.1 -[1.3.0]: https://github.com/urfave/cli/compare/v1.2.0...v1.3.0 -[1.2.0]: https://github.com/urfave/cli/compare/v1.1.0...v1.2.0 -[1.1.0]: https://github.com/urfave/cli/compare/v1.0.0...v1.1.0 -[1.0.0]: https://github.com/urfave/cli/compare/v0.1.0...v1.0.0 diff --git a/vendor/github.com/urfave/cli/CONTRIBUTING.md b/vendor/github.com/urfave/cli/CONTRIBUTING.md deleted file mode 100644 index 9a4640a3..00000000 --- a/vendor/github.com/urfave/cli/CONTRIBUTING.md +++ /dev/null @@ -1,18 +0,0 @@ -## Contributing - -Use @urfave/cli to ping the maintainers. - -Feel free to put up a pull request to fix a bug or maybe add a feature. We will -give it a code review and make sure that it does not break backwards -compatibility. If collaborators agree that it is in line with -the vision of the project, we will work with you to get the code into -a mergeable state and merge it into the master branch. - -If you have contributed something significant to the project, we will most -likely add you as a collaborator. As a collaborator you are given the ability -to merge others pull requests. It is very important that new code does not -break existing code, so be careful about what code you do choose to merge. - -If you feel like you have contributed to the project but have not yet been added -as a collaborator, we probably forgot to add you :sweat_smile:. Please open an -issue! diff --git a/vendor/github.com/urfave/cli/README.md b/vendor/github.com/urfave/cli/README.md index 96720b62..b2abbcf9 100644 --- a/vendor/github.com/urfave/cli/README.md +++ b/vendor/github.com/urfave/cli/README.md @@ -13,60 +13,19 @@ cli is a simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. - +## Usage Documentation -- [Overview](#overview) -- [Installation](#installation) - * [Supported platforms](#supported-platforms) - * [Using the `v2` branch](#using-the-v2-branch) - * [Using `v1` releases](#using-v1-releases) -- [Getting Started](#getting-started) -- [Examples](#examples) - * [Arguments](#arguments) - * [Flags](#flags) - + [Placeholder Values](#placeholder-values) - + [Alternate Names](#alternate-names) - + [Ordering](#ordering) - + [Values from the Environment](#values-from-the-environment) - + [Values from files](#values-from-files) - + [Values from alternate input sources (YAML, TOML, and others)](#values-from-alternate-input-sources-yaml-toml-and-others) - + [Precedence](#precedence) - * [Subcommands](#subcommands) - * [Subcommands categories](#subcommands-categories) - * [Exit code](#exit-code) - * [Combining short options](#combining-short-options) - * [Bash Completion](#bash-completion) - + [Enabling](#enabling) - + [Distribution](#distribution) - + [Customization](#customization) - * [Generated Help Text](#generated-help-text) - + [Customization](#customization-1) - * [Version Flag](#version-flag) - + [Customization](#customization-2) - + [Full API Example](#full-api-example) -- [Contribution Guidelines](#contribution-guidelines) +Usage documentation exists for each major version - - -## Overview - -Command line apps are usually so tiny that there is absolutely no reason why -your code should *not* be self-documenting. Things like generating help text and -parsing command flags/options should not hinder productivity when writing a -command line app. - -**This is where cli comes into play.** cli makes command line programming fun, -organized, and expressive! +- `v1` - [./docs/v1/manual.md](./docs/v1/manual.md) +- `v2` - 🚧 documentation for `v2` is WIP 🚧 ## Installation Make sure you have a working Go environment. Go version 1.10+ is supported. [See the install instructions for Go](http://golang.org/doc/install.html). -To install cli, simply run: -``` -$ go get github.com/urfave/cli -``` +### GOPATH Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can be easily used: @@ -80,30 +39,6 @@ cli is tested against multiple versions of Go on Linux, and against the latest released version of Go on OS X and Windows. For full details, see [`./.travis.yml`](./.travis.yml) and [`./appveyor.yml`](./appveyor.yml). -### Using the `v2` branch - -**Warning**: The `v2` branch is currently unreleased and considered unstable. - -There is currently a long-lived branch named `v2` that is intended to land as -the new `master` branch once development there has settled down. The current -`master` branch (mirrored as `v1`) is being manually merged into `v2` on -an irregular human-based schedule, but generally if one wants to "upgrade" to -`v2` *now* and accept the volatility (read: "awesomeness") that comes along with -that, please use whatever version pinning of your preference, such as via -`gopkg.in`: - -``` -$ go get gopkg.in/urfave/cli.v2 -``` - -``` go -... -import ( - "gopkg.in/urfave/cli.v2" // imports as package "cli" -) -... -``` - ### Using `v1` releases ``` @@ -118,1454 +53,18 @@ import ( ... ``` +### Using `v2` releases -## Getting Started - -One of the philosophies behind cli is that an API should be playful and full of -discovery. So a cli app can be as little as one line of code in `main()`. - - -``` go -package main - -import ( - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - err := cli.NewApp().Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -This app will run and show help text, but is not very useful. Let's give an -action to execute and some help documentation: - - -``` go -package main - -import ( - "fmt" - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - app.Name = "boom" - app.Usage = "make an explosive entrance" - app.Action = func(c *cli.Context) error { - fmt.Println("boom! I say!") - return nil - } - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -Running this already gives you a ton of functionality, plus support for things -like subcommands and flags, which are covered below. - -## Examples - -Being a programmer can be a lonely job. Thankfully by the power of automation -that is not the case! Let's create a greeter app to fend off our demons of -loneliness! - -Start by creating a directory named `greet`, and within it, add a file, -`greet.go` with the following code in it: - - -``` go -package main - -import ( - "fmt" - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - app.Name = "greet" - app.Usage = "fight the loneliness!" - app.Action = func(c *cli.Context) error { - fmt.Println("Hello friend!") - return nil - } - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -Install our command to the `$GOPATH/bin` directory: +**Warning**: `v2` is in a pre-release state. ``` -$ go install +$ go get github.com/urfave/cli.v2 ``` -Finally run our new command: - -``` -$ greet -Hello friend! -``` - -cli also generates neat help text: - -``` -$ greet help -NAME: - greet - fight the loneliness! - -USAGE: - greet [global options] command [command options] [arguments...] - -VERSION: - 0.0.0 - -COMMANDS: - help, h Shows a list of commands or help for one command - -GLOBAL OPTIONS - --version Shows version information -``` - -### Arguments - -You can lookup arguments by calling the `Args` function on `cli.Context`, e.g.: - - -``` go -package main - -import ( - "fmt" - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Action = func(c *cli.Context) error { - fmt.Printf("Hello %q", c.Args().Get(0)) - return nil - } - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -### Flags - -Setting and querying flags is simple. - - -``` go -package main - -import ( - "fmt" - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang", - Value: "english", - Usage: "language for the greeting", - }, - } - - app.Action = func(c *cli.Context) error { - name := "Nefertiti" - if c.NArg() > 0 { - name = c.Args().Get(0) - } - if c.String("lang") == "spanish" { - fmt.Println("Hola", name) - } else { - fmt.Println("Hello", name) - } - return nil - } - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -You can also set a destination variable for a flag, to which the content will be -scanned. - - -``` go -package main - -import ( - "log" - "os" - "fmt" - - "github.com/urfave/cli" -) - -func main() { - var language string - - app := cli.NewApp() - - app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang", - Value: "english", - Usage: "language for the greeting", - Destination: &language, - }, - } - - app.Action = func(c *cli.Context) error { - name := "someone" - if c.NArg() > 0 { - name = c.Args()[0] - } - if language == "spanish" { - fmt.Println("Hola", name) - } else { - fmt.Println("Hello", name) - } - return nil - } - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -See full list of flags at http://godoc.org/github.com/urfave/cli - -#### Placeholder Values - -Sometimes it's useful to specify a flag's value within the usage string itself. -Such placeholders are indicated with back quotes. - -For example this: - - -```go -package main - -import ( - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Flags = []cli.Flag{ - cli.StringFlag{ - Name: "config, c", - Usage: "Load configuration from `FILE`", - }, - } - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -Will result in help output like: - -``` ---config FILE, -c FILE Load configuration from FILE -``` - -Note that only the first placeholder is used. Subsequent back-quoted words will -be left as-is. - -#### Alternate Names - -You can set alternate (or short) names for flags by providing a comma-delimited -list for the `Name`. e.g. - - -``` go -package main - -import ( - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - }, - } - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -That flag can then be set with `--lang spanish` or `-l spanish`. Note that -giving two different forms of the same flag in the same command invocation is an -error. - -#### Ordering - -Flags for the application and commands are shown in the order they are defined. -However, it's possible to sort them from outside this library by using `FlagsByName` -or `CommandsByName` with `sort`. - -For example this: - - -``` go -package main - -import ( - "log" - "os" - "sort" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "Language for the greeting", - }, - cli.StringFlag{ - Name: "config, c", - Usage: "Load configuration from `FILE`", - }, - } - - app.Commands = []cli.Command{ - { - Name: "complete", - Aliases: []string{"c"}, - Usage: "complete a task on the list", - Action: func(c *cli.Context) error { - return nil - }, - }, - { - Name: "add", - Aliases: []string{"a"}, - Usage: "add a task to the list", - Action: func(c *cli.Context) error { - return nil - }, - }, - } - - sort.Sort(cli.FlagsByName(app.Flags)) - sort.Sort(cli.CommandsByName(app.Commands)) - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -Will result in help output like: - -``` ---config FILE, -c FILE Load configuration from FILE ---lang value, -l value Language for the greeting (default: "english") -``` - -#### Values from the Environment - -You can also have the default value set from the environment via `EnvVar`. e.g. - - -``` go -package main - -import ( - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - EnvVar: "APP_LANG", - }, - } - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -The `EnvVar` may also be given as a comma-delimited "cascade", where the first -environment variable that resolves is used as the default. - - -``` go -package main - -import ( - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG", - }, - } - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -#### Values from files - -You can also have the default value set from file via `FilePath`. e.g. - - -``` go -package main - -import ( - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "password, p", - Usage: "password for the mysql database", - FilePath: "/etc/mysql/password", - }, - } - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -Note that default values set from file (e.g. `FilePath`) take precedence over -default values set from the environment (e.g. `EnvVar`). - -#### Values from alternate input sources (YAML, TOML, and others) - -There is a separate package altsrc that adds support for getting flag values -from other file input sources. - -Currently supported input source formats: -* YAML -* JSON -* TOML - -In order to get values for a flag from an alternate input source the following -code would be added to wrap an existing cli.Flag like below: - -``` go - altsrc.NewIntFlag(cli.IntFlag{Name: "test"}) -``` - -Initialization must also occur for these flags. Below is an example initializing -getting data from a yaml file below. - -``` go - command.Before = altsrc.InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load")) -``` - -The code above will use the "load" string as a flag name to get the file name of -a yaml file from the cli.Context. It will then use that file name to initialize -the yaml input source for any flags that are defined on that command. As a note -the "load" flag used would also have to be defined on the command flags in order -for this code snipped to work. - -Currently only YAML, JSON, and TOML files are supported but developers can add support -for other input sources by implementing the altsrc.InputSourceContext for their -given sources. - -Here is a more complete sample of a command using YAML support: - - -``` go -package notmain - -import ( - "fmt" - "log" - "os" - - "github.com/urfave/cli" - "github.com/urfave/cli/altsrc" -) - -func main() { - app := cli.NewApp() - - flags := []cli.Flag{ - altsrc.NewIntFlag(cli.IntFlag{Name: "test"}), - cli.StringFlag{Name: "load"}, - } - - app.Action = func(c *cli.Context) error { - fmt.Println("yaml ist rad") - return nil - } - - app.Before = altsrc.InitInputSourceWithContext(flags, altsrc.NewYamlSourceFromFlagFunc("load")) - app.Flags = flags - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -#### Precedence - -The precedence for flag value sources is as follows (highest to lowest): - -0. Command line flag value from user -0. Environment variable (if specified) -0. Configuration file (if specified) -0. Default defined on the flag - -### Subcommands - -Subcommands can be defined for a more git-like command line app. - - -```go -package main - -import ( - "fmt" - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - - app.Commands = []cli.Command{ - { - Name: "add", - Aliases: []string{"a"}, - Usage: "add a task to the list", - Action: func(c *cli.Context) error { - fmt.Println("added task: ", c.Args().First()) - return nil - }, - }, - { - Name: "complete", - Aliases: []string{"c"}, - Usage: "complete a task on the list", - Action: func(c *cli.Context) error { - fmt.Println("completed task: ", c.Args().First()) - return nil - }, - }, - { - Name: "template", - Aliases: []string{"t"}, - Usage: "options for task templates", - Subcommands: []cli.Command{ - { - Name: "add", - Usage: "add a new template", - Action: func(c *cli.Context) error { - fmt.Println("new task template: ", c.Args().First()) - return nil - }, - }, - { - Name: "remove", - Usage: "remove an existing template", - Action: func(c *cli.Context) error { - fmt.Println("removed task template: ", c.Args().First()) - return nil - }, - }, - }, - }, - } - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -### Subcommands categories - -For additional organization in apps that have many subcommands, you can -associate a category for each command to group them together in the help -output. - -E.g. - ```go -package main - +... import ( - "log" - "os" - - "github.com/urfave/cli" + "github.com/urfave/cli.v2" // imports as package "cli" ) - -func main() { - app := cli.NewApp() - - app.Commands = []cli.Command{ - { - Name: "noop", - }, - { - Name: "add", - Category: "Template actions", - }, - { - Name: "remove", - Category: "Template actions", - }, - } - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} +... ``` - -Will include: - -``` -COMMANDS: - noop - - Template actions: - add - remove -``` - -### Exit code - -Calling `App.Run` will not automatically call `os.Exit`, which means that by -default the exit code will "fall through" to being `0`. An explicit exit code -may be set by returning a non-nil error that fulfills `cli.ExitCoder`, *or* a -`cli.MultiError` that includes an error that fulfills `cli.ExitCoder`, e.g.: - -``` go -package main - -import ( - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - app.Flags = []cli.Flag{ - cli.BoolFlag{ - Name: "ginger-crouton", - Usage: "Add ginger croutons to the soup", - }, - } - app.Action = func(ctx *cli.Context) error { - if !ctx.Bool("ginger-crouton") { - return cli.NewExitError("Ginger croutons are not in the soup", 86) - } - return nil - } - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -### Combining short options - -Traditional use of options using their shortnames look like this: - -``` -$ cmd -s -o -m "Some message" -``` - -Suppose you want users to be able to combine options with their shortnames. This -can be done using the `UseShortOptionHandling` bool in your app configuration, -or for individual commands by attaching it to the command configuration. For -example: - - -``` go -package main - -import ( - "fmt" - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - app := cli.NewApp() - app.UseShortOptionHandling = true - app.Commands = []cli.Command{ - { - Name: "short", - Usage: "complete a task on the list", - Flags: []cli.Flag{ - cli.BoolFlag{Name: "serve, s"}, - cli.BoolFlag{Name: "option, o"}, - cli.StringFlag{Name: "message, m"}, - }, - Action: func(c *cli.Context) error { - fmt.Println("serve:", c.Bool("serve")) - fmt.Println("option:", c.Bool("option")) - fmt.Println("message:", c.String("message")) - return nil - }, - }, - } - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -If your program has any number of bool flags such as `serve` and `option`, and -optionally one non-bool flag `message`, with the short options of `-s`, `-o`, -and `-m` respectively, setting `UseShortOptionHandling` will also support the -following syntax: - -``` -$ cmd -som "Some message" -``` - -If you enable `UseShortOptionHandling`, then you must not use any flags that -have a single leading `-` or this will result in failures. For example, -`-option` can no longer be used. Flags with two leading dashes (such as -`--options`) are still valid. - -### Bash Completion - -You can enable completion commands by setting the `EnableBashCompletion` -flag on the `App` object. By default, this setting will only auto-complete to -show an app's subcommands, but you can write your own completion methods for -the App or its subcommands. - - -``` go -package main - -import ( - "fmt" - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - tasks := []string{"cook", "clean", "laundry", "eat", "sleep", "code"} - - app := cli.NewApp() - app.EnableBashCompletion = true - app.Commands = []cli.Command{ - { - Name: "complete", - Aliases: []string{"c"}, - Usage: "complete a task on the list", - Action: func(c *cli.Context) error { - fmt.Println("completed task: ", c.Args().First()) - return nil - }, - BashComplete: func(c *cli.Context) { - // This will complete if no args are passed - if c.NArg() > 0 { - return - } - for _, t := range tasks { - fmt.Println(t) - } - }, - }, - } - - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -#### Enabling - -Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while -setting the `PROG` variable to the name of your program: - -`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete` - -#### Distribution - -Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename -it to the name of the program you wish to add autocomplete support for (or -automatically install it there if you are distributing a package). Don't forget -to source the file to make it active in the current shell. - -``` -sudo cp src/bash_autocomplete /etc/bash_completion.d/ -source /etc/bash_completion.d/ -``` - -Alternatively, you can just document that users should source the generic -`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set -to the name of their program (as above). - -#### Customization - -The default bash completion flag (`--generate-bash-completion`) is defined as -`cli.BashCompletionFlag`, and may be redefined if desired, e.g.: - - -``` go -package main - -import ( - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - cli.BashCompletionFlag = cli.BoolFlag{ - Name: "compgen", - Hidden: true, - } - - app := cli.NewApp() - app.EnableBashCompletion = true - app.Commands = []cli.Command{ - { - Name: "wat", - }, - } - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -### Generated Help Text - -The default help flag (`-h/--help`) is defined as `cli.HelpFlag` and is checked -by the cli internals in order to print generated help text for the app, command, -or subcommand, and break execution. - -#### Customization - -All of the help text generation may be customized, and at multiple levels. The -templates are exposed as variables `AppHelpTemplate`, `CommandHelpTemplate`, and -`SubcommandHelpTemplate` which may be reassigned or augmented, and full override -is possible by assigning a compatible func to the `cli.HelpPrinter` variable, -e.g.: - - -``` go -package main - -import ( - "fmt" - "log" - "io" - "os" - - "github.com/urfave/cli" -) - -func main() { - // EXAMPLE: Append to an existing template - cli.AppHelpTemplate = fmt.Sprintf(`%s - -WEBSITE: http://awesometown.example.com - -SUPPORT: support@awesometown.example.com - -`, cli.AppHelpTemplate) - - // EXAMPLE: Override a template - cli.AppHelpTemplate = `NAME: - {{.Name}} - {{.Usage}} -USAGE: - {{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} - {{if len .Authors}} -AUTHOR: - {{range .Authors}}{{ . }}{{end}} - {{end}}{{if .Commands}} -COMMANDS: -{{range .Commands}}{{if not .HideHelp}} {{join .Names ", "}}{{ "\t"}}{{.Usage}}{{ "\n" }}{{end}}{{end}}{{end}}{{if .VisibleFlags}} -GLOBAL OPTIONS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}}{{if .Copyright }} -COPYRIGHT: - {{.Copyright}} - {{end}}{{if .Version}} -VERSION: - {{.Version}} - {{end}} -` - - // EXAMPLE: Replace the `HelpPrinter` func - cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) { - fmt.Println("Ha HA. I pwnd the help!!1") - } - - err := cli.NewApp().Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -The default flag may be customized to something other than `-h/--help` by -setting `cli.HelpFlag`, e.g.: - - -``` go -package main - -import ( - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - cli.HelpFlag = cli.BoolFlag{ - Name: "halp, haaaaalp", - Usage: "HALP", - EnvVar: "SHOW_HALP,HALPPLZ", - } - - err := cli.NewApp().Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -### Version Flag - -The default version flag (`-v/--version`) is defined as `cli.VersionFlag`, which -is checked by the cli internals in order to print the `App.Version` via -`cli.VersionPrinter` and break execution. - -#### Customization - -The default flag may be customized to something other than `-v/--version` by -setting `cli.VersionFlag`, e.g.: - - -``` go -package main - -import ( - "log" - "os" - - "github.com/urfave/cli" -) - -func main() { - cli.VersionFlag = cli.BoolFlag{ - Name: "print-version, V", - Usage: "print only the version", - } - - app := cli.NewApp() - app.Name = "partay" - app.Version = "19.99.0" - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -Alternatively, the version printer at `cli.VersionPrinter` may be overridden, e.g.: - - -``` go -package main - -import ( - "fmt" - "log" - "os" - - "github.com/urfave/cli" -) - -var ( - Revision = "fafafaf" -) - -func main() { - cli.VersionPrinter = func(c *cli.Context) { - fmt.Printf("version=%s revision=%s\n", c.App.Version, Revision) - } - - app := cli.NewApp() - app.Name = "partay" - app.Version = "19.99.0" - err := app.Run(os.Args) - if err != nil { - log.Fatal(err) - } -} -``` - -#### Full API Example - -**Notice**: This is a contrived (functioning) example meant strictly for API -demonstration purposes. Use of one's imagination is encouraged. - - -``` go -package main - -import ( - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "time" - - "github.com/urfave/cli" -) - -func init() { - cli.AppHelpTemplate += "\nCUSTOMIZED: you bet ur muffins\n" - cli.CommandHelpTemplate += "\nYMMV\n" - cli.SubcommandHelpTemplate += "\nor something\n" - - cli.HelpFlag = cli.BoolFlag{Name: "halp"} - cli.BashCompletionFlag = cli.BoolFlag{Name: "compgen", Hidden: true} - cli.VersionFlag = cli.BoolFlag{Name: "print-version, V"} - - cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) { - fmt.Fprintf(w, "best of luck to you\n") - } - cli.VersionPrinter = func(c *cli.Context) { - fmt.Fprintf(c.App.Writer, "version=%s\n", c.App.Version) - } - cli.OsExiter = func(c int) { - fmt.Fprintf(cli.ErrWriter, "refusing to exit %d\n", c) - } - cli.ErrWriter = ioutil.Discard - cli.FlagStringer = func(fl cli.Flag) string { - return fmt.Sprintf("\t\t%s", fl.GetName()) - } -} - -type hexWriter struct{} - -func (w *hexWriter) Write(p []byte) (int, error) { - for _, b := range p { - fmt.Printf("%x", b) - } - fmt.Printf("\n") - - return len(p), nil -} - -type genericType struct{ - s string -} - -func (g *genericType) Set(value string) error { - g.s = value - return nil -} - -func (g *genericType) String() string { - return g.s -} - -func main() { - app := cli.NewApp() - app.Name = "kənˈtrīv" - app.Version = "19.99.0" - app.Compiled = time.Now() - app.Authors = []cli.Author{ - cli.Author{ - Name: "Example Human", - Email: "human@example.com", - }, - } - app.Copyright = "(c) 1999 Serious Enterprise" - app.HelpName = "contrive" - app.Usage = "demonstrate available API" - app.UsageText = "contrive - demonstrating the available API" - app.ArgsUsage = "[args and such]" - app.Commands = []cli.Command{ - cli.Command{ - Name: "doo", - Aliases: []string{"do"}, - Category: "motion", - Usage: "do the doo", - UsageText: "doo - does the dooing", - Description: "no really, there is a lot of dooing to be done", - ArgsUsage: "[arrgh]", - Flags: []cli.Flag{ - cli.BoolFlag{Name: "forever, forevvarr"}, - }, - Subcommands: cli.Commands{ - cli.Command{ - Name: "wop", - Action: wopAction, - }, - }, - SkipFlagParsing: false, - HideHelp: false, - Hidden: false, - HelpName: "doo!", - BashComplete: func(c *cli.Context) { - fmt.Fprintf(c.App.Writer, "--better\n") - }, - Before: func(c *cli.Context) error { - fmt.Fprintf(c.App.Writer, "brace for impact\n") - return nil - }, - After: func(c *cli.Context) error { - fmt.Fprintf(c.App.Writer, "did we lose anyone?\n") - return nil - }, - Action: func(c *cli.Context) error { - c.Command.FullName() - c.Command.HasName("wop") - c.Command.Names() - c.Command.VisibleFlags() - fmt.Fprintf(c.App.Writer, "dodododododoodododddooooododododooo\n") - if c.Bool("forever") { - c.Command.Run(c) - } - return nil - }, - OnUsageError: func(c *cli.Context, err error, isSubcommand bool) error { - fmt.Fprintf(c.App.Writer, "for shame\n") - return err - }, - }, - } - app.Flags = []cli.Flag{ - cli.BoolFlag{Name: "fancy"}, - cli.BoolTFlag{Name: "fancier"}, - cli.DurationFlag{Name: "howlong, H", Value: time.Second * 3}, - cli.Float64Flag{Name: "howmuch"}, - cli.GenericFlag{Name: "wat", Value: &genericType{}}, - cli.Int64Flag{Name: "longdistance"}, - cli.Int64SliceFlag{Name: "intervals"}, - cli.IntFlag{Name: "distance"}, - cli.IntSliceFlag{Name: "times"}, - cli.StringFlag{Name: "dance-move, d"}, - cli.StringSliceFlag{Name: "names, N"}, - cli.UintFlag{Name: "age"}, - cli.Uint64Flag{Name: "bigage"}, - } - app.EnableBashCompletion = true - app.UseShortOptionHandling = true - app.HideHelp = false - app.HideVersion = false - app.BashComplete = func(c *cli.Context) { - fmt.Fprintf(c.App.Writer, "lipstick\nkiss\nme\nlipstick\nringo\n") - } - app.Before = func(c *cli.Context) error { - fmt.Fprintf(c.App.Writer, "HEEEERE GOES\n") - return nil - } - app.After = func(c *cli.Context) error { - fmt.Fprintf(c.App.Writer, "Phew!\n") - return nil - } - app.CommandNotFound = func(c *cli.Context, command string) { - fmt.Fprintf(c.App.Writer, "Thar be no %q here.\n", command) - } - app.OnUsageError = func(c *cli.Context, err error, isSubcommand bool) error { - if isSubcommand { - return err - } - - fmt.Fprintf(c.App.Writer, "WRONG: %#v\n", err) - return nil - } - app.Action = func(c *cli.Context) error { - cli.DefaultAppComplete(c) - cli.HandleExitCoder(errors.New("not an exit coder, though")) - cli.ShowAppHelp(c) - cli.ShowCommandCompletions(c, "nope") - cli.ShowCommandHelp(c, "also-nope") - cli.ShowCompletions(c) - cli.ShowSubcommandHelp(c) - cli.ShowVersion(c) - - categories := c.App.Categories() - categories.AddCommand("sounds", cli.Command{ - Name: "bloop", - }) - - for _, category := range c.App.Categories() { - fmt.Fprintf(c.App.Writer, "%s\n", category.Name) - fmt.Fprintf(c.App.Writer, "%#v\n", category.Commands) - fmt.Fprintf(c.App.Writer, "%#v\n", category.VisibleCommands()) - } - - fmt.Printf("%#v\n", c.App.Command("doo")) - if c.Bool("infinite") { - c.App.Run([]string{"app", "doo", "wop"}) - } - - if c.Bool("forevar") { - c.App.RunAsSubcommand(c) - } - c.App.Setup() - fmt.Printf("%#v\n", c.App.VisibleCategories()) - fmt.Printf("%#v\n", c.App.VisibleCommands()) - fmt.Printf("%#v\n", c.App.VisibleFlags()) - - fmt.Printf("%#v\n", c.Args().First()) - if len(c.Args()) > 0 { - fmt.Printf("%#v\n", c.Args()[1]) - } - fmt.Printf("%#v\n", c.Args().Present()) - fmt.Printf("%#v\n", c.Args().Tail()) - - set := flag.NewFlagSet("contrive", 0) - nc := cli.NewContext(c.App, set, c) - - fmt.Printf("%#v\n", nc.Args()) - fmt.Printf("%#v\n", nc.Bool("nope")) - fmt.Printf("%#v\n", nc.BoolT("nerp")) - fmt.Printf("%#v\n", nc.Duration("howlong")) - fmt.Printf("%#v\n", nc.Float64("hay")) - fmt.Printf("%#v\n", nc.Generic("bloop")) - fmt.Printf("%#v\n", nc.Int64("bonk")) - fmt.Printf("%#v\n", nc.Int64Slice("burnks")) - fmt.Printf("%#v\n", nc.Int("bips")) - fmt.Printf("%#v\n", nc.IntSlice("blups")) - fmt.Printf("%#v\n", nc.String("snurt")) - fmt.Printf("%#v\n", nc.StringSlice("snurkles")) - fmt.Printf("%#v\n", nc.Uint("flub")) - fmt.Printf("%#v\n", nc.Uint64("florb")) - fmt.Printf("%#v\n", nc.GlobalBool("global-nope")) - fmt.Printf("%#v\n", nc.GlobalBoolT("global-nerp")) - fmt.Printf("%#v\n", nc.GlobalDuration("global-howlong")) - fmt.Printf("%#v\n", nc.GlobalFloat64("global-hay")) - fmt.Printf("%#v\n", nc.GlobalGeneric("global-bloop")) - fmt.Printf("%#v\n", nc.GlobalInt("global-bips")) - fmt.Printf("%#v\n", nc.GlobalIntSlice("global-blups")) - fmt.Printf("%#v\n", nc.GlobalString("global-snurt")) - fmt.Printf("%#v\n", nc.GlobalStringSlice("global-snurkles")) - - fmt.Printf("%#v\n", nc.FlagNames()) - fmt.Printf("%#v\n", nc.GlobalFlagNames()) - fmt.Printf("%#v\n", nc.GlobalIsSet("wat")) - fmt.Printf("%#v\n", nc.GlobalSet("wat", "nope")) - fmt.Printf("%#v\n", nc.NArg()) - fmt.Printf("%#v\n", nc.NumFlags()) - fmt.Printf("%#v\n", nc.Parent()) - - nc.Set("wat", "also-nope") - - ec := cli.NewExitError("ohwell", 86) - fmt.Fprintf(c.App.Writer, "%d", ec.ExitCode()) - fmt.Printf("made it!\n") - return nil - } - - if os.Getenv("HEXY") != "" { - app.Writer = &hexWriter{} - app.ErrWriter = &hexWriter{} - } - - app.Metadata = map[string]interface{}{ - "layers": "many", - "explicable": false, - "whatever-values": 19.99, - } - - - // ignore error so we don't exit non-zero and break gfmrun README example tests - _ = app.Run(os.Args) -} - -func wopAction(c *cli.Context) error { - fmt.Fprintf(c.App.Writer, ":wave: over here, eh\n") - return nil -} -``` - -## Contribution Guidelines - -See [./CONTRIBUTING.md](./CONTRIBUTING.md) diff --git a/vendor/github.com/urfave/cli/app.go b/vendor/github.com/urfave/cli/app.go index 76e869d3..95d20381 100644 --- a/vendor/github.com/urfave/cli/app.go +++ b/vendor/github.com/urfave/cli/app.go @@ -199,12 +199,12 @@ func (a *App) Run(arguments []string) (err error) { // always appends the completion flag at the end of the command shellComplete, arguments := checkShellCompleteFlag(a, arguments) - _, err = a.newFlagSet() + set, err := a.newFlagSet() if err != nil { return err } - set, err := parseIter(a, arguments[1:]) + err = parseIter(set, a, arguments[1:]) nerr := normalizeFlags(a.Flags, set) context := NewContext(a, set, nil) if nerr != nil { @@ -322,12 +322,12 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { } a.Commands = newCmds - _, err = a.newFlagSet() + set, err := a.newFlagSet() if err != nil { return err } - set, err := parseIter(a, ctx.Args().Tail()) + err = parseIter(set, a, ctx.Args().Tail()) nerr := normalizeFlags(a.Flags, set) context := NewContext(a, set, ctx) diff --git a/vendor/github.com/urfave/cli/appveyor.yml b/vendor/github.com/urfave/cli/appveyor.yml index 6d2dcee2..8ef2fea1 100644 --- a/vendor/github.com/urfave/cli/appveyor.yml +++ b/vendor/github.com/urfave/cli/appveyor.yml @@ -6,18 +6,23 @@ image: Visual Studio 2017 clone_folder: c:\gopath\src\github.com\urfave\cli +cache: + - node_modules + environment: GOPATH: C:\gopath GOVERSION: 1.11.x + GO111MODULE: on + GOPROXY: https://proxy.golang.org install: - set PATH=%GOPATH%\bin;C:\go\bin;%PATH% - go version - go env - - go get github.com/urfave/gfmrun/... - - go get -v -t ./... + - go get github.com/urfave/gfmrun/cmd/gfmrun + - go mod vendor build_script: - go run build.go vet - go run build.go test - - go run build.go gfmrun + - go run build.go gfmrun docs/v1/manual.md diff --git a/vendor/github.com/urfave/cli/command.go b/vendor/github.com/urfave/cli/command.go index 44a90de6..e7cb97a6 100644 --- a/vendor/github.com/urfave/cli/command.go +++ b/vendor/github.com/urfave/cli/command.go @@ -190,10 +190,15 @@ func (c *Command) parseFlags(args Args) (*flag.FlagSet, error) { } if !c.SkipArgReorder { - args = reorderArgs(args) + args = reorderArgs(c.Flags, args) } - set, err := parseIter(c, args) + set, err := c.newFlagSet() + if err != nil { + return nil, err + } + + err = parseIter(set, c, args) if err != nil { return nil, err } @@ -214,34 +219,73 @@ func (c *Command) useShortOptionHandling() bool { return c.UseShortOptionHandling } -// reorderArgs moves all flags before arguments as this is what flag expects -func reorderArgs(args []string) []string { - var nonflags, flags []string +// reorderArgs moves all flags (via reorderedArgs) before the rest of +// the arguments (remainingArgs) as this is what flag expects. +func reorderArgs(commandFlags []Flag, args []string) []string { + var remainingArgs, reorderedArgs []string - readFlagValue := false + nextIndexMayContainValue := false for i, arg := range args { + + // dont reorder any args after a -- + // read about -- here: + // https://unix.stackexchange.com/questions/11376/what-does-double-dash-mean-also-known-as-bare-double-dash if arg == "--" { - nonflags = append(nonflags, args[i:]...) + remainingArgs = append(remainingArgs, args[i:]...) break - } - if readFlagValue && !strings.HasPrefix(arg, "-") && !strings.HasPrefix(arg, "--") { - readFlagValue = false - flags = append(flags, arg) - continue - } - readFlagValue = false + // checks if this arg is a value that should be re-ordered next to its associated flag + } else if nextIndexMayContainValue && !strings.HasPrefix(arg, "-") { + nextIndexMayContainValue = false + reorderedArgs = append(reorderedArgs, arg) - if arg != "-" && strings.HasPrefix(arg, "-") { - flags = append(flags, arg) + // checks if this is an arg that should be re-ordered + } else if argIsFlag(commandFlags, arg) { + // we have determined that this is a flag that we should re-order + reorderedArgs = append(reorderedArgs, arg) + // if this arg does not contain a "=", then the next index may contain the value for this flag + nextIndexMayContainValue = !strings.Contains(arg, "=") - readFlagValue = !strings.Contains(arg, "=") + // simply append any remaining args } else { - nonflags = append(nonflags, arg) + remainingArgs = append(remainingArgs, arg) } } - return append(flags, nonflags...) + return append(reorderedArgs, remainingArgs...) +} + +// argIsFlag checks if an arg is one of our command flags +func argIsFlag(commandFlags []Flag, arg string) bool { + // checks if this is just a `-`, and so definitely not a flag + if arg == "-" { + return false + } + // flags always start with a - + if !strings.HasPrefix(arg, "-") { + return false + } + // this line turns `--flag` into `flag` + if strings.HasPrefix(arg, "--") { + arg = strings.Replace(arg, "-", "", 2) + } + // this line turns `-flag` into `flag` + if strings.HasPrefix(arg, "-") { + arg = strings.Replace(arg, "-", "", 1) + } + // this line turns `flag=value` into `flag` + arg = strings.Split(arg, "=")[0] + // look through all the flags, to see if the `arg` is one of our flags + for _, flag := range commandFlags { + for _, key := range strings.Split(flag.GetName(), ",") { + key := strings.TrimSpace(key) + if key == arg { + return true + } + } + } + // return false if this arg was not one of our flags + return false } // Names returns the names including short names and aliases. diff --git a/vendor/github.com/urfave/cli/flag.go b/vendor/github.com/urfave/cli/flag.go index 3c052707..1cfa1cdb 100644 --- a/vendor/github.com/urfave/cli/flag.go +++ b/vendor/github.com/urfave/cli/flag.go @@ -86,7 +86,7 @@ type RequiredFlag interface { type DocGenerationFlag interface { Flag - // TakesValue returns true of the flag takes a value, otherwise false + // TakesValue returns true if the flag takes a value, otherwise false TakesValue() bool // GetUsage returns the usage string for the flag diff --git a/vendor/github.com/urfave/cli/help.go b/vendor/github.com/urfave/cli/help.go index 7a4ef692..2280e338 100644 --- a/vendor/github.com/urfave/cli/help.go +++ b/vendor/github.com/urfave/cli/help.go @@ -47,13 +47,18 @@ type helpPrinter func(w io.Writer, templ string, data interface{}) // Prints help for the App or Command with custom template function. type helpPrinterCustom func(w io.Writer, templ string, data interface{}, customFunc map[string]interface{}) -// HelpPrinter is a function that writes the help output. If not set a default -// is used. The function signature is: -// func(w io.Writer, templ string, data interface{}) +// HelpPrinter is a function that writes the help output. If not set explicitly, +// this calls HelpPrinterCustom using only the default template functions. +// +// If custom logic for printing help is required, this function can be +// overridden. If the ExtraInfo field is defined on an App, this function +// should not be modified, as HelpPrinterCustom will be used directly in order +// to capture the extra information. var HelpPrinter helpPrinter = printHelp -// HelpPrinterCustom is same as HelpPrinter but -// takes a custom function for template function map. +// HelpPrinterCustom is a function that writes the help output. It is used as +// the default implementation of HelpPrinter, and may be called directly if +// the ExtraInfo field is set on an App. var HelpPrinterCustom helpPrinterCustom = printHelpCustom // VersionPrinter prints the version for the App @@ -66,20 +71,24 @@ func ShowAppHelpAndExit(c *Context, exitCode int) { } // ShowAppHelp is an action that displays the help. -func ShowAppHelp(c *Context) (err error) { - if c.App.CustomAppHelpTemplate == "" { - HelpPrinter(c.App.Writer, AppHelpTemplate, c.App) - return +func ShowAppHelp(c *Context) error { + template := c.App.CustomAppHelpTemplate + if template == "" { + template = AppHelpTemplate } + + if c.App.ExtraInfo == nil { + HelpPrinter(c.App.Writer, template, c.App) + return nil + } + customAppData := func() map[string]interface{} { - if c.App.ExtraInfo == nil { - return nil - } return map[string]interface{}{ "ExtraInfo": c.App.ExtraInfo, } } - HelpPrinterCustom(c.App.Writer, c.App.CustomAppHelpTemplate, c.App, customAppData()) + HelpPrinterCustom(c.App.Writer, template, c.App, customAppData()) + return nil } @@ -186,11 +195,13 @@ func ShowCommandHelp(ctx *Context, command string) error { for _, c := range ctx.App.Commands { if c.HasName(command) { - if c.CustomHelpTemplate != "" { - HelpPrinterCustom(ctx.App.Writer, c.CustomHelpTemplate, c, nil) - } else { - HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c) + templ := c.CustomHelpTemplate + if templ == "" { + templ = CommandHelpTemplate } + + HelpPrinter(ctx.App.Writer, templ, c) + return nil } } @@ -238,11 +249,15 @@ func ShowCommandCompletions(ctx *Context, command string) { } -func printHelpCustom(out io.Writer, templ string, data interface{}, customFunc map[string]interface{}) { +// printHelpCustom is the default implementation of HelpPrinterCustom. +// +// The customFuncs map will be combined with a default template.FuncMap to +// allow using arbitrary functions in template rendering. +func printHelpCustom(out io.Writer, templ string, data interface{}, customFuncs map[string]interface{}) { funcMap := template.FuncMap{ "join": strings.Join, } - for key, value := range customFunc { + for key, value := range customFuncs { funcMap[key] = value } @@ -261,7 +276,7 @@ func printHelpCustom(out io.Writer, templ string, data interface{}, customFunc m } func printHelp(out io.Writer, templ string, data interface{}) { - printHelpCustom(out, templ, data, nil) + HelpPrinterCustom(out, templ, data, nil) } func checkVersion(c *Context) bool { diff --git a/vendor/github.com/urfave/cli/parse.go b/vendor/github.com/urfave/cli/parse.go index 865accf1..660f538b 100644 --- a/vendor/github.com/urfave/cli/parse.go +++ b/vendor/github.com/urfave/cli/parse.go @@ -14,42 +14,51 @@ type iterativeParser interface { // iteratively catch parsing errors. This way we achieve LR parsing without // transforming any arguments. Otherwise, there is no way we can discriminate // combined short options from common arguments that should be left untouched. -func parseIter(ip iterativeParser, args []string) (*flag.FlagSet, error) { +func parseIter(set *flag.FlagSet, ip iterativeParser, args []string) error { for { - set, err := ip.newFlagSet() - if err != nil { - return nil, err - } - - err = set.Parse(args) + err := set.Parse(args) if !ip.useShortOptionHandling() || err == nil { - return set, err + return err } errStr := err.Error() - trimmed := strings.TrimPrefix(errStr, "flag provided but not defined: ") + trimmed := strings.TrimPrefix(errStr, "flag provided but not defined: -") if errStr == trimmed { - return nil, err + return err } // regenerate the initial args with the split short opts - newArgs := []string{} + argsWereSplit := false for i, arg := range args { - if arg != trimmed { - newArgs = append(newArgs, arg) + // skip args that are not part of the error message + if name := strings.TrimLeft(arg, "-"); name != trimmed { continue } - shortOpts := splitShortOptions(set, trimmed) + // if we can't split, the error was accurate + shortOpts := splitShortOptions(set, arg) if len(shortOpts) == 1 { - return nil, err + return err } - // add each short option and all remaining arguments - newArgs = append(newArgs, shortOpts...) - newArgs = append(newArgs, args[i+1:]...) - args = newArgs + // swap current argument with the split version + args = append(args[:i], append(shortOpts, args[i+1:]...)...) + argsWereSplit = true + break } + + // This should be an impossible to reach code path, but in case the arg + // splitting failed to happen, this will prevent infinite loops + if !argsWereSplit { + return err + } + + // Since custom parsing failed, replace the flag set before retrying + newSet, err := ip.newFlagSet() + if err != nil { + return err + } + *set = *newSet } } diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md index 2dff3761..c9e64b1a 100644 --- a/vendor/go.etcd.io/bbolt/README.md +++ b/vendor/go.etcd.io/bbolt/README.md @@ -152,11 +152,12 @@ are not thread safe. To work with data in multiple goroutines you must start a transaction for each one or use locking to ensure only one goroutine accesses a transaction at a time. Creating transaction from the `DB` is thread safe. -Read-only transactions and read-write transactions should not depend on one -another and generally shouldn't be opened simultaneously in the same goroutine. -This can cause a deadlock as the read-write transaction needs to periodically -re-map the data file but it cannot do so while a read-only transaction is open. - +Transactions should not depend on one another and generally shouldn't be opened +simultaneously in the same goroutine. This can cause a deadlock as the read-write +transaction needs to periodically re-map the data file but it cannot do so while +any read-only transaction is open. Even a nested read-only transaction can cause +a deadlock, as the child transaction can block the parent transaction from releasing +its resources. #### Read-write transactions diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go index d441b692..697a4696 100644 --- a/vendor/go.etcd.io/bbolt/freelist.go +++ b/vendor/go.etcd.io/bbolt/freelist.go @@ -2,7 +2,6 @@ package bbolt import ( "fmt" - "reflect" "sort" "unsafe" ) @@ -94,24 +93,8 @@ func (f *freelist) pending_count() int { return count } -// copyallunsafe copies a list of all free ids and all pending ids in one sorted list. +// copyall copies a list of all free ids and all pending ids in one sorted list. // f.count returns the minimum length required for dst. -func (f *freelist) copyallunsafe(dstptr unsafe.Pointer) { // dstptr is []pgid data pointer - m := make(pgids, 0, f.pending_count()) - for _, txp := range f.pending { - m = append(m, txp.ids...) - } - sort.Sort(m) - fpgids := f.getFreePageIDs() - sz := len(fpgids) + len(m) - dst := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{ - Data: uintptr(dstptr), - Len: sz, - Cap: sz, - })) - mergepgids(dst, fpgids, m) -} - func (f *freelist) copyall(dst []pgid) { m := make(pgids, 0, f.pending_count()) for _, txp := range f.pending { @@ -284,21 +267,23 @@ func (f *freelist) read(p *page) { } // If the page.count is at the max uint16 value (64k) then it's considered // an overflow and the size of the freelist is stored as the first element. - var idx, count uintptr = 0, uintptr(p.count) + var idx, count = 0, int(p.count) if count == 0xFFFF { idx = 1 - count = uintptr(*(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p)))) + c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) + count = int(c) + if count < 0 { + panic(fmt.Sprintf("leading element count %d overflows int", c)) + } } // Copy the list of page ids from the freelist. if count == 0 { f.ids = nil } else { - ids := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + idx*unsafe.Sizeof(pgid(0)), - Len: int(count), - Cap: int(count), - })) + var ids []pgid + data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx) + unsafeSlice(unsafe.Pointer(&ids), data, count) // copy the ids, so we don't modify on the freelist page directly idsCopy := make([]pgid, count) @@ -331,16 +316,22 @@ func (f *freelist) write(p *page) error { // The page.count can only hold up to 64k elements so if we overflow that // number then we handle it by putting the size in the first element. - lenids := f.count() - if lenids == 0 { - p.count = uint16(lenids) - } else if lenids < 0xFFFF { - p.count = uint16(lenids) - f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) + l := f.count() + if l == 0 { + p.count = uint16(l) + } else if l < 0xFFFF { + p.count = uint16(l) + var ids []pgid + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&ids), data, l) + f.copyall(ids) } else { p.count = 0xFFFF - *(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) = pgid(lenids) - f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + unsafe.Sizeof(pgid(0)))) + var ids []pgid + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&ids), data, l+1) + ids[0] = pgid(l) + f.copyall(ids[1:]) } return nil diff --git a/vendor/go.etcd.io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go index 1690eef3..73988b5c 100644 --- a/vendor/go.etcd.io/bbolt/node.go +++ b/vendor/go.etcd.io/bbolt/node.go @@ -3,7 +3,6 @@ package bbolt import ( "bytes" "fmt" - "reflect" "sort" "unsafe" ) @@ -208,36 +207,32 @@ func (n *node) write(p *page) { } // Loop over each item and write it to the page. - bp := uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) + // off tracks the offset into p of the start of the next data. + off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) for i, item := range n.inodes { _assert(len(item.key) > 0, "write: zero-length inode key") + // Create a slice to write into of needed size and advance + // byte pointer for next iteration. + sz := len(item.key) + len(item.value) + b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz) + off += uintptr(sz) + // Write the page element. if n.isLeaf { elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem))) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) elem.flags = item.flags elem.ksize = uint32(len(item.key)) elem.vsize = uint32(len(item.value)) } else { elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem))) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) elem.ksize = uint32(len(item.key)) elem.pgid = item.pgid _assert(elem.pgid != p.id, "write: circular dependency occurred") } - // Create a slice to write into of needed size and advance - // byte pointer for next iteration. - klen, vlen := len(item.key), len(item.value) - sz := klen + vlen - b := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Data: bp, - Len: sz, - Cap: sz, - })) - bp += uintptr(sz) - // Write data for the element to the end of the page. l := copy(b, item.key) copy(b[l:], item.value) diff --git a/vendor/go.etcd.io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go index b5c16997..c9a158fb 100644 --- a/vendor/go.etcd.io/bbolt/page.go +++ b/vendor/go.etcd.io/bbolt/page.go @@ -3,7 +3,6 @@ package bbolt import ( "fmt" "os" - "reflect" "sort" "unsafe" ) @@ -51,13 +50,13 @@ func (p *page) typ() string { // meta returns a pointer to the metadata section of the page. func (p *page) meta() *meta { - return (*meta)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) + return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) } // leafPageElement retrieves the leaf node by index func (p *page) leafPageElement(index uint16) *leafPageElement { - off := uintptr(index) * unsafe.Sizeof(leafPageElement{}) - return (*leafPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off)) + return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + leafPageElementSize, int(index))) } // leafPageElements retrieves a list of leaf nodes. @@ -65,17 +64,16 @@ func (p *page) leafPageElements() []leafPageElement { if p.count == 0 { return nil } - return *(*[]leafPageElement)(unsafe.Pointer(&reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p), - Len: int(p.count), - Cap: int(p.count), - })) + var elems []leafPageElement + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) + return elems } // branchPageElement retrieves the branch node by index func (p *page) branchPageElement(index uint16) *branchPageElement { - off := uintptr(index) * unsafe.Sizeof(branchPageElement{}) - return (*branchPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off)) + return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + unsafe.Sizeof(branchPageElement{}), int(index))) } // branchPageElements retrieves a list of branch nodes. @@ -83,20 +81,15 @@ func (p *page) branchPageElements() []branchPageElement { if p.count == 0 { return nil } - return *(*[]branchPageElement)(unsafe.Pointer(&reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p), - Len: int(p.count), - Cap: int(p.count), - })) + var elems []branchPageElement + data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) + return elems } // dump writes n bytes of the page to STDERR as hex output. func (p *page) hexdump(n int) { - buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(p)), - Len: n, - Cap: n, - })) + buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n) fmt.Fprintf(os.Stderr, "%x\n", buf) } @@ -115,11 +108,7 @@ type branchPageElement struct { // key returns a byte slice of the node key. func (n *branchPageElement) key() []byte { - return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos), - Len: int(n.ksize), - Cap: int(n.ksize), - })) + return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) } // leafPageElement represents a node on a leaf page. @@ -132,20 +121,16 @@ type leafPageElement struct { // key returns a byte slice of the node key. func (n *leafPageElement) key() []byte { - return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos), - Len: int(n.ksize), - Cap: int(n.ksize), - })) + i := int(n.pos) + j := i + int(n.ksize) + return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) } // value returns a byte slice of the node value. func (n *leafPageElement) value() []byte { - return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos) + uintptr(n.ksize), - Len: int(n.vsize), - Cap: int(n.vsize), - })) + i := int(n.pos) + int(n.ksize) + j := i + int(n.vsize) + return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) } // PageInfo represents human readable information about a page. diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go index 13937cdb..4b1a64a8 100644 --- a/vendor/go.etcd.io/bbolt/tx.go +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -4,7 +4,6 @@ import ( "fmt" "io" "os" - "reflect" "sort" "strings" "time" @@ -524,24 +523,18 @@ func (tx *Tx) write() error { // Write pages to disk in order. for _, p := range pages { - size := (int(p.overflow) + 1) * tx.db.pageSize + rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize) offset := int64(p.id) * int64(tx.db.pageSize) + var written uintptr // Write out page in "max allocation" sized chunks. - ptr := uintptr(unsafe.Pointer(p)) for { - // Limit our write to our max allocation size. - sz := size + sz := rem if sz > maxAllocSize-1 { sz = maxAllocSize - 1 } + buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) - // Write chunk to disk. - buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Data: ptr, - Len: sz, - Cap: sz, - })) if _, err := tx.db.ops.writeAt(buf, offset); err != nil { return err } @@ -550,14 +543,14 @@ func (tx *Tx) write() error { tx.stats.Write++ // Exit inner for loop if we've written all the chunks. - size -= sz - if size == 0 { + rem -= sz + if rem == 0 { break } // Otherwise move offset forward and move pointer to next chunk. offset += int64(sz) - ptr += uintptr(sz) + written += uintptr(sz) } } @@ -576,11 +569,7 @@ func (tx *Tx) write() error { continue } - buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(p)), - Len: tx.db.pageSize, - Cap: tx.db.pageSize, - })) + buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 for i := range buf { diff --git a/vendor/go.etcd.io/bbolt/unsafe.go b/vendor/go.etcd.io/bbolt/unsafe.go new file mode 100644 index 00000000..c0e50375 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/unsafe.go @@ -0,0 +1,39 @@ +package bbolt + +import ( + "reflect" + "unsafe" +) + +func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { + return unsafe.Pointer(uintptr(base) + offset) +} + +func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { + return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz) +} + +func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { + // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices + // + // This memory is not allocated from C, but it is unmanaged by Go's + // garbage collector and should behave similarly, and the compiler + // should produce similar code. Note that this conversion allows a + // subslice to begin after the base address, with an optional offset, + // while the URL above does not cover this case and only slices from + // index 0. However, the wiki never says that the address must be to + // the beginning of a C allocation (or even that malloc was used at + // all), so this is believed to be correct. + return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j] +} + +// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by +// the slice parameter. This helper should be used over other direct +// manipulation of reflect.SliceHeader to prevent misuse, namely, converting +// from reflect.SliceHeader to a Go slice type. +func unsafeSlice(slice, data unsafe.Pointer, len int) { + s := (*reflect.SliceHeader)(slice) + s.Data = uintptr(data) + s.Cap = len + s.Len = len +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 00000000..dc5225b6 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := int(dstStart.Sub(srcStart) / srcInterval) + srcIndex += advance + srcStart = srcStart.Add(time.Duration(advance) * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 00000000..c646a695 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 00000000..9bf4286c --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 00000000..3ebf6f2d --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "context" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// HTTP ServeMux paths. +const ( + debugRequestsPath = "/debug/requests" + debugEventsPath = "/debug/events" +) + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) + if pat == debugRequestsPath { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc(debugRequestsPath, Traces) + http.HandleFunc(debugEventsPath, Events) +} + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 00000000..30f632c5 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,136 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + isFront := s.waiters.Front() == elem + s.waiters.Remove(elem) + // If we're at the front and there're extra tokens left, notify other waiters. + if isFront && s.size > s.cur { + s.notifyWaiters() + } + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + s.notifyWaiters() + s.mu.Unlock() +} + +func (s *Weighted) notifyWaiters() { + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } +} diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml new file mode 100644 index 00000000..a11e8cbc --- /dev/null +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -0,0 +1,42 @@ +language: go + +matrix: + include: + - go: 1.13.x + env: VET=1 GO111MODULE=on + - go: 1.13.x + env: RACE=1 GO111MODULE=on + - go: 1.13.x + env: RUN386=1 + - go: 1.13.x + env: GRPC_GO_RETRY=on + - go: 1.13.x + env: TESTEXTRAS=1 + - go: 1.12.x + env: GO111MODULE=on + - go: 1.11.x + env: GO111MODULE=on + - go: 1.9.x + env: GAE=1 + +go_import_path: google.golang.org/grpc + +before_install: + - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi + - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi + - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi + - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi + +install: + - try3() { eval "$*" || eval "$*" || eval "$*"; } + - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' + - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi + - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi + +script: + - set -e + - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi + - if [[ -n "${VET}" ]]; then ./vet.sh; fi + - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi + - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi + - make test diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md new file mode 100644 index 00000000..9d4213eb --- /dev/null +++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 00000000..4f1567e2 --- /dev/null +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,62 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) +and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. + +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often times receive PRs that are trying to fix several things at + a time, but only one fix is considered acceptable, nothing gets merged and + both author's & review's time is wasted. Create more PRs to address different + concerns and everyone will be happy. + +- The grpc package should only depend on standard Go packages and a small number + of exceptions. If your contribution introduces new dependencies which are NOT + in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a + discussion with gRPC-Go authors and consultants. + +- For speculative changes, consider opening an issue and discussing it first. If + you are suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made + and **why** it was made. Link to a github issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line + to address an issue. PRs with irrelevant changes won't be merged. If you do + want to fix formatting or style, do that in a separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. We expect you to be reasonably + responsive to those comments, otherwise the PR will be closed after 2-3 weeks + of inactivity. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs + with messy commit history are difficult to review and won't be merged. Use + `rebase -i upstream/master` to curate your commit history and/or to bring in + latest changes from master (but avoid rebasing in the middle of a code + review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we + can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We + recommend you **run tests locally** before creating your PR to catch breakages + early on. + - `make all` to test everything, OR + - `make vet` to catch vet errors + - `make test` to run the tests + - `make testrace` to run tests in race mode + - optional `make testappengine` to run tests with appengine + +- Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md new file mode 100644 index 00000000..d6ff2674 --- /dev/null +++ b/vendor/google.golang.org/grpc/GOVERNANCE.md @@ -0,0 +1 @@ +This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md new file mode 100644 index 00000000..093c82b3 --- /dev/null +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -0,0 +1,27 @@ +This page lists all active maintainers of this repository. If you were a +maintainer and would like to add your name to the Emeritus list, please send us a +PR. + +See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) +for governance guidelines and how to become a maintainer. +See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) +for general contribution guidelines. + +## Maintainers (in alphabetical order) +- [canguler](https://github.com/canguler), Google LLC +- [cesarghali](https://github.com/cesarghali), Google LLC +- [dfawley](https://github.com/dfawley), Google LLC +- [easwars](https://github.com/easwars), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC +- [menghanl](https://github.com/menghanl), Google LLC +- [srini100](https://github.com/srini100), Google LLC + +## Emeritus Maintainers (in alphabetical order) +- [adelez](https://github.com/adelez), Google LLC +- [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jtattermusch](https://github.com/jtattermusch), Google LLC +- [lyuxuan](https://github.com/lyuxuan), Google LLC +- [makmukhi](https://github.com/makmukhi), Google LLC +- [matt-kwong](https://github.com/matt-kwong), Google LLC +- [nicolasnoble](https://github.com/nicolasnoble), Google LLC +- [yongni](https://github.com/yongni), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 00000000..410f7d56 --- /dev/null +++ b/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,63 @@ +all: vet test testrace + +build: deps + go build google.golang.org/grpc/... + +clean: + go clean -i google.golang.org/grpc/... + +deps: + go get -d -v google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go generate google.golang.org/grpc/... + +test: testdeps + go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testsubmodule: testdeps + cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + +testappengine: testappenginedeps + goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testappenginedeps: + goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/... + +testdeps: + go get -d -v -t google.golang.org/grpc/... + +testrace: testdeps + go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +updatedeps: + go get -d -v -u -f google.golang.org/grpc/... + +updatetestdeps: + go get -d -v -t -u -f google.golang.org/grpc/... + +vet: vetdeps + ./vet.sh + +vetdeps: + ./vet.sh -install + +.PHONY: \ + all \ + build \ + clean \ + deps \ + proto \ + test \ + testappengine \ + testappenginedeps \ + testdeps \ + testrace \ + updatedeps \ + updatetestdeps \ + vet \ + vetdeps diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 00000000..800e7bd4 --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,141 @@ +# gRPC-Go + +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) +[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) +[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) + +The Go implementation of [gRPC](https://grpc.io/): A high performance, open +source, general RPC framework that puts mobile and HTTP/2 first. For more +information see the [gRPC Quick Start: +Go](https://grpc.io/docs/quickstart/go.html) guide. + +Installation +------------ + +To install this package, you need to install Go and setup your Go workspace on +your computer. The simplest way to install the library is to run: + +``` +$ go get -u google.golang.org/grpc +``` + +With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in +your source code and `go [build|run|test]` will automatically download the +necessary dependencies ([Go modules +ref](https://github.com/golang/go/wiki/Modules)). + +If you are trying to access grpc-go from within China, please see the +[FAQ](#FAQ) below. + +Prerequisites +------------- +gRPC-Go requires Go 1.9 or later. + +Documentation +------------- +- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API + descriptions. +- Documentation on specific topics can be found in the [Documentation + directory](Documentation/). +- Examples can be found in the [examples directory](examples/). + +Performance +----------- +Performance benchmark data for grpc-go and other languages is maintained in +[this +dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). + +Status +------ +General Availability [Google Cloud Platform Launch +Stages](https://cloud.google.com/terms/launch-stages). + +FAQ +--- + +#### I/O Timeout Errors + +The `golang.org` domain may be blocked from some countries. `go get` usually +produces an error like the following when this happens: + +``` +$ go get -u google.golang.org/grpc +package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) +``` + +To build Go code, there are several options: + +- Set up a VPN and access google.golang.org through that. + +- Without Go module support: `git clone` the repo manually: + + ``` + git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc + ``` + + You will need to do the same for all of grpc's dependencies in `golang.org`, + e.g. `golang.org/x/net`. + +- With Go module support: it is possible to use the `replace` feature of `go + mod` to create aliases for golang.org packages. In your project's directory: + + ``` + go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest + go mod tidy + go mod vendor + go build -mod=vendor + ``` + + Again, this will need to be done for all transitive dependencies hosted on + golang.org as well. Please refer to [this + issue](https://github.com/golang/go/issues/28652) in the golang repo regarding + this concern. + +#### Compiling error, undefined: grpc.SupportPackageIsVersion + +##### If you are using Go modules: + +Please ensure your gRPC-Go version is `require`d at the appropriate version in +the same module containing the generated `.pb.go` files. For example, +`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: + +``` +module + +require ( + google.golang.org/grpc v1.27.0 +) +``` + +##### If you are *not* using Go modules: + +Please update proto package, gRPC package and rebuild the proto files: + - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` + - `go get -u google.golang.org/grpc` + - `protoc --go_out=plugins=grpc:. *.proto` + +#### How to turn on logging + +The default logger is controlled by the environment variables. Turn everything +on by setting: + +``` +GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info +``` + +#### The RPC failed with error `"code = Unavailable desc = transport is closing"` + +This error means the connection the RPC is using was closed, and there are many +possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown + 1. Keepalive parameters caused connection shutdown, for example if you have configured + your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. + +It can be tricky to debug this because the error happens on the client side but +the root cause of the connection being closed is on the server side. Turn on +logging on __both client and server__, and see if there are any transport +errors. diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go new file mode 100644 index 00000000..68ffc620 --- /dev/null +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package attributes defines a generic key/value store used in various gRPC +// components. +// +// All APIs in this package are EXPERIMENTAL. +package attributes + +import "fmt" + +// Attributes is an immutable struct for storing and retrieving generic +// key/value pairs. Keys must be hashable, and users should define their own +// types for keys. +type Attributes struct { + m map[interface{}]interface{} +} + +// New returns a new Attributes containing all key/value pairs in kvs. If the +// same key appears multiple times, the last value overwrites all previous +// values for that key. Panics if len(kvs) is not even. +func New(kvs ...interface{}) *Attributes { + if len(kvs)%2 != 0 { + panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + } + a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} + for i := 0; i < len(kvs)/2; i++ { + a.m[kvs[i*2]] = kvs[i*2+1] + } + return a +} + +// WithValues returns a new Attributes containing all key/value pairs in a and +// kvs. Panics if len(kvs) is not even. If the same key appears multiple +// times, the last value overwrites all previous values for that key. To +// remove an existing key, use a nil value. +func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { + if len(kvs)%2 != 0 { + panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + } + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} + for k, v := range a.m { + n.m[k] = v + } + for i := 0; i < len(kvs)/2; i++ { + n.m[kvs[i*2]] = kvs[i*2+1] + } + return n +} + +// Value returns the value associated with these attributes for key, or nil if +// no value is associated with key. +func (a *Attributes) Value(key interface{}) interface{} { + return a.m[key] +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 00000000..ff7c3ee6 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatibility. + +package grpc + +import ( + "time" + + "google.golang.org/grpc/backoff" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// ConnectParams defines the parameters for connecting and retrying. Users are +// encouraged to use this instead of the BackoffConfig type defined above. See +// here for more details: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This API is EXPERIMENTAL. +type ConnectParams struct { + // Backoff specifies the configuration options for connection backoff. + Backoff backoff.Config + // MinConnectTimeout is the minimum amount of time we are willing to give a + // connection to complete. + MinConnectTimeout time.Duration +} diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go new file mode 100644 index 00000000..0787d0b5 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff provides configuration options for backoff. +// +// More details can be found at: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// All APIs in this package are experimental. +package backoff + +import "time" + +// Config defines the configuration options for backoff. +type Config struct { + // BaseDelay is the amount of time to backoff after the first failure. + BaseDelay time.Duration + // Multiplier is the factor with which to multiply backoffs after a + // failed retry. Should ideally be greater than 1. + Multiplier float64 + // Jitter is the factor with which backoffs are randomized. + Jitter float64 + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// DefaultConfig is a backoff configuration with the default values specfied +// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This should be useful for callers who want to configure backoff with +// non-default values only for a subset of the options. +var DefaultConfig = Config{ + BaseDelay: 1.0 * time.Second, + Multiplier: 1.6, + Jitter: 0.2, + MaxDelay: 120 * time.Second, +} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go new file mode 100644 index 00000000..a8eb0f47 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer.go @@ -0,0 +1,391 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "net" + "sync" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/naming" + "google.golang.org/grpc/status" +) + +// Address represents a server the client connects to. +// +// Deprecated: please use package balancer. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BalancerConfig specifies the configurations for Balancer. +// +// Deprecated: please use package balancer. May be removed in a future 1.x release. +type BalancerConfig struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) +} + +// BalancerGetOptions configures a Get call. +// +// Deprecated: please use package balancer. May be removed in a future 1.x release. +type BalancerGetOptions struct { + // BlockingWait specifies whether Get should block when there is no + // connected address. + BlockingWait bool +} + +// Balancer chooses network addresses for RPCs. +// +// Deprecated: please use package balancer. May be removed in a future 1.x release. +type Balancer interface { + // Start does the initialization work to bootstrap a Balancer. For example, + // this function may start the name resolution and watch the updates. It will + // be called when dialing. + Start(target string, config BalancerConfig) error + // Up informs the Balancer that gRPC has a connection to the server at + // addr. It returns down which is called once the connection to addr gets + // lost or closed. + // TODO: It is not clear how to construct and take advantage of the meaningful error + // parameter for down. Need realistic demands to guide. + Up(addr Address) (down func(error)) + // Get gets the address of a server for the RPC corresponding to ctx. + // i) If it returns a connected address, gRPC internals issues the RPC on the + // connection to this address; + // ii) If it returns an address on which the connection is under construction + // (initiated by Notify(...)) but not connected, gRPC internals + // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or + // Shutdown state; + // or + // * issues RPC on the connection otherwise. + // iii) If it returns an address on which the connection does not exist, gRPC + // internals treats it as an error and will fail the corresponding RPC. + // + // Therefore, the following is the recommended rule when writing a custom Balancer. + // If opts.BlockingWait is true, it should return a connected address or + // block if there is no connected address. It should respect the timeout or + // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast + // RPCs), it should return an address it has notified via Notify(...) immediately + // instead of blocking. + // + // The function returns put which is called once the rpc has completed or failed. + // put can collect and report RPC stats to a remote load balancer. + // + // This function should only return the errors Balancer cannot recover by itself. + // gRPC internals will fail the RPC if an error is returned. + Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) + // Notify returns a channel that is used by gRPC internals to watch the addresses + // gRPC needs to connect. The addresses might be from a name resolver or remote + // load balancer. gRPC internals will compare it with the existing connected + // addresses. If the address Balancer notified is not in the existing connected + // addresses, gRPC starts to connect the address. If an address in the existing + // connected addresses is not in the notification list, the corresponding connection + // is shutdown gracefully. Otherwise, there are no operations to take. Note that + // the Address slice must be the full list of the Addresses which should be connected. + // It is NOT delta. + Notify() <-chan []Address + // Close shuts down the balancer. + Close() error +} + +// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch +// the name resolution updates and updates the addresses available correspondingly. +// +// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release. +func RoundRobin(r naming.Resolver) Balancer { + return &roundRobin{r: r} +} + +type addrInfo struct { + addr Address + connected bool +} + +type roundRobin struct { + r naming.Resolver + w naming.Watcher + addrs []*addrInfo // all the addresses the client should potentially connect + mu sync.Mutex + addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. + next int // index of the next address to return for Get() + waitCh chan struct{} // the channel to block when there is no connected address available + done bool // The Balancer is closed. +} + +func (rr *roundRobin) watchAddrUpdates() error { + updates, err := rr.w.Next() + if err != nil { + grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) + return err + } + rr.mu.Lock() + defer rr.mu.Unlock() + for _, update := range updates { + addr := Address{ + Addr: update.Addr, + Metadata: update.Metadata, + } + switch update.Op { + case naming.Add: + var exist bool + for _, v := range rr.addrs { + if addr == v.addr { + exist = true + grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) + break + } + } + if exist { + continue + } + rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) + case naming.Delete: + for i, v := range rr.addrs { + if addr == v.addr { + copy(rr.addrs[i:], rr.addrs[i+1:]) + rr.addrs = rr.addrs[:len(rr.addrs)-1] + break + } + } + default: + grpclog.Errorln("Unknown update.Op ", update.Op) + } + } + // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. + open := make([]Address, len(rr.addrs)) + for i, v := range rr.addrs { + open[i] = v.addr + } + if rr.done { + return ErrClientConnClosing + } + select { + case <-rr.addrCh: + default: + } + rr.addrCh <- open + return nil +} + +func (rr *roundRobin) Start(target string, config BalancerConfig) error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return ErrClientConnClosing + } + if rr.r == nil { + // If there is no name resolver installed, it is not needed to + // do name resolution. In this case, target is added into rr.addrs + // as the only address available and rr.addrCh stays nil. + rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) + return nil + } + w, err := rr.r.Resolve(target) + if err != nil { + return err + } + rr.w = w + rr.addrCh = make(chan []Address, 1) + go func() { + for { + if err := rr.watchAddrUpdates(); err != nil { + return + } + } + }() + return nil +} + +// Up sets the connected state of addr and sends notification if there are pending +// Get() calls. +func (rr *roundRobin) Up(addr Address) func(error) { + rr.mu.Lock() + defer rr.mu.Unlock() + var cnt int + for _, a := range rr.addrs { + if a.addr == addr { + if a.connected { + return nil + } + a.connected = true + } + if a.connected { + cnt++ + } + } + // addr is only one which is connected. Notify the Get() callers who are blocking. + if cnt == 1 && rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + return func(err error) { + rr.down(addr, err) + } +} + +// down unsets the connected state of addr. +func (rr *roundRobin) down(addr Address, err error) { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, a := range rr.addrs { + if addr == a.addr { + a.connected = false + break + } + } +} + +// Get returns the next addr in the rotation. +func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { + var ch chan struct{} + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + if !opts.BlockingWait { + if len(rr.addrs) == 0 { + rr.mu.Unlock() + err = status.Errorf(codes.Unavailable, "there is no address available") + return + } + // Returns the next addr on rr.addrs for failfast RPCs. + addr = rr.addrs[rr.next].addr + rr.next++ + rr.mu.Unlock() + return + } + // Wait on rr.waitCh for non-failfast RPCs. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + case <-ch: + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + // The newly added addr got removed by Down() again. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + } + } +} + +func (rr *roundRobin) Notify() <-chan []Address { + return rr.addrCh +} + +func (rr *roundRobin) Close() error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return errBalancerClosed + } + rr.done = true + if rr.w != nil { + rr.w.Close() + } + if rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + if rr.addrCh != nil { + close(rr.addrCh) + } + return nil +} + +// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. +// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() +// returns the only address Up by resetTransport(). +type pickFirst struct { + *roundRobin +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 00000000..9258858e --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,454 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "context" + "encoding/json" + "errors" + "net" + "strings" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. If the +// Builder implements ConfigParser, ParseConfig will be called when new service +// configs are received by the resolver, and the result will be provided to the +// Balancer in UpdateClientConnState. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[strings.ToLower(b.Name())] = b +} + +// unregisterForTesting deletes the balancer with the given name from the +// balancer map. +// +// This function is not thread-safe. +func unregisterForTesting(name string) { + delete(m, name) +} + +func init() { + internal.BalancerUnregister = unregisterForTesting +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insensitive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if b, ok := m[strings.ToLower(name)]; ok { + return b + } + return nil +} + +// SubConn represents a gRPC sub connection. +// Each sub connection contains a list of addresses. gRPC will +// try to connect to them (in sequence), and stop trying the +// remainder once one connection is successful. +// +// The reconnect backoff will be applied on the list, not a single address. +// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger +// the connecting, Balancers must call Connect. +// When the connection encounters an error, it will reconnect immediately. +// When the connection becomes IDLE, it will not reconnect unless Connect is +// called. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct { + // CredsBundle is the credentials bundle that will be used in the created + // SubConn. If it's nil, the original creds from grpc DialOptions will be + // used. + CredsBundle credentials.Bundle + // HealthCheckEnabled indicates whether health check service should be + // enabled on this SubConn + HealthCheckEnabled bool +} + +// State contains the balancer's state relevant to the gRPC ClientConn. +type State struct { + // State contains the connectivity state of the balancer, which is used to + // determine the state of the ClientConn. + ConnectivityState connectivity.State + // Picker is used to choose connections (SubConns) for RPCs. + Picker V2Picker +} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + + // UpdateBalancerState is called by balancer to notify gRPC that some internal + // state in balancer has changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConn. + // + // Deprecated: use UpdateState instead + UpdateBalancerState(s connectivity.State, p Picker) + + // UpdateState notifies gRPC that the balancer's internal state has + // changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConns. + UpdateState(State) + + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOptions) + + // Target returns the dial target for this ClientConn. + // + // Deprecated: Use the Target field in the BuildOptions instead. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle that the Balancer can use. + CredsBundle credentials.Bundle + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) + // ChannelzParentID is the entity parent's channelz unique identification number. + ChannelzParentID int64 + // Target contains the parsed address info of the dial target. It is the same resolver.Target as + // passed to the resolver. + // See the documentation for the resolver.Target type for details about what it contains. + Target resolver.Target +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// ConfigParser parses load balancer configs. +type ConfigParser interface { + // ParseConfig parses the JSON load balancer config provided into an + // internal form or returns an error if the config is invalid. For future + // compatibility reasons, unknown fields in the config should be ignored. + ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) +} + +// PickInfo contains additional information for the Pick operation. +type PickInfo struct { + // FullMethodName is the method name that NewClientStream() is called + // with. The canonical format is /service/Method. + FullMethodName string + // Ctx is the RPC's context, and may contain relevant RPC-level information + // like the outgoing header metadata. + Ctx context.Context +} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error + // Trailer contains the metadata from the RPC's trailer, if present. + Trailer metadata.MD + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool + // ServerLoad is the load received from server. It's usually sent as part of + // trailing metadata. + // + // The only supported type now is *orca_v1.LoadReport. + ServerLoad interface{} +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure")) +) + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +// +// Deprecated: use V2Picker instead +type Picker interface { + // Pick returns the SubConn to be used to send the RPC. + // The returned SubConn must be one returned by NewSubConn(). + // + // This functions is expected to return: + // - a SubConn that is known to be READY; + // - ErrNoSubConnAvailable if no SubConn is available, but progress is being + // made (for example, some SubConn is in CONNECTING mode); + // - other errors if no active connecting is happening (for example, all SubConn + // are in TRANSIENT_FAILURE mode). + // + // If a SubConn is returned: + // - If it is READY, gRPC will send the RPC on it; + // - If it is not ready, or becomes not ready after it's returned, gRPC will + // block until UpdateBalancerState() is called and will call pick on the + // new picker. The done function returned from Pick(), if not nil, will be + // called with nil error, no bytes sent and no bytes received. + // + // If the returned error is not nil: + // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() + // - If the error is ErrTransientFailure or implements IsTransientFailure() + // bool, returning true: + // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() + // is called to pick again; + // - Otherwise, RPC will fail with unavailable error. + // - Else (error is other non-nil error): + // - The RPC will fail with the error's status code, or Unknown if it is + // not a status error. + // + // The returned done() function will be called once the rpc has finished, + // with the final status of that RPC. If the SubConn returned is not a + // valid SubConn type, done may not be called. done may be nil if balancer + // doesn't care about the RPC status. + Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error) +} + +// PickResult contains information related to a connection chosen for an RPC. +type PickResult struct { + // SubConn is the connection to use for this pick, if its state is Ready. + // If the state is not Ready, gRPC will block the RPC until a new Picker is + // provided by the balancer (using ClientConn.UpdateState). The SubConn + // must be one returned by ClientConn.NewSubConn. + SubConn SubConn + + // Done is called when the RPC is completed. If the SubConn is not ready, + // this will be called with a nil parameter. If the SubConn is not a valid + // type, Done may not be called. May be nil if the balancer does not wish + // to be notified when the RPC completes. + Done func(DoneInfo) +} + +type transientFailureError struct { + error +} + +func (e *transientFailureError) IsTransientFailure() bool { return true } + +// TransientFailureError wraps err in an error implementing +// IsTransientFailure() bool, returning true. +func TransientFailureError(err error) error { + return &transientFailureError{error: err} +} + +// V2Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +type V2Picker interface { + // Pick returns the connection to use for this RPC and related information. + // + // Pick should not block. If the balancer needs to do I/O or any blocking + // or time-consuming work to service this call, it should return + // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when + // the Picker is updated (using ClientConn.UpdateState). + // + // If an error is returned: + // + // - If the error is ErrNoSubConnAvailable, gRPC will block until a new + // Picker is provided by the balancer (using ClientConn.UpdateState). + // + // - If the error implements IsTransientFailure() bool, returning true, + // wait for ready RPCs will wait, but non-wait for ready RPCs will be + // terminated with this error's Error() string and status code + // Unavailable. + // + // - Any other errors terminate all RPCs with the code and message + // provided. If the error is not a status error, it will be converted by + // gRPC to a status error with code Unknown. + Pick(info PickInfo) (PickResult, error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed +// to be called synchronously from the same goroutine. +// There's no guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // HandleSubConnStateChange is called by gRPC when the connectivity state + // of sc has changed. + // Balancer is expected to aggregate all the state of SubConn and report + // that back to gRPC. + // Balancer should also generate and update Pickers when its internal state has + // been changed by the new state. + // + // Deprecated: if V2Balancer is implemented by the Balancer, + // UpdateSubConnState will be called instead. + HandleSubConnStateChange(sc SubConn, state connectivity.State) + // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to + // balancers. + // Balancer can create new SubConn or remove SubConn with the addresses. + // An empty address slice and a non-nil error will be passed if the resolver returns + // non-nil error to gRPC. + // + // Deprecated: if V2Balancer is implemented by the Balancer, + // UpdateClientConnState will be called instead. + HandleResolvedAddrs([]resolver.Address, error) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error +} + +// ClientConnState describes the state of a ClientConn relevant to the +// balancer. +type ClientConnState struct { + ResolverState resolver.State + // The parsed load balancing configuration returned by the builder's + // ParseConfig method, if implemented. + BalancerConfig serviceconfig.LoadBalancingConfig +} + +// ErrBadResolverState may be returned by UpdateClientConnState to indicate a +// problem with the provided name resolver data. +var ErrBadResolverState = errors.New("bad resolver state") + +// V2Balancer is defined for documentation purposes. If a Balancer also +// implements V2Balancer, its UpdateClientConnState method will be called +// instead of HandleResolvedAddrs and its UpdateSubConnState will be called +// instead of HandleSubConnStateChange. +type V2Balancer interface { + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + UpdateSubConnState(SubConn, SubConnState) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else the aggregated state is TransientFailure. +// +// Idle and Shutdown are not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 00000000..80559b80 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,286 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "context" + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder + v2PickerBuilder V2PickerBuilder + config Config +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + bal := &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + v2PickerBuilder: bb.v2PickerBuilder, + + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + config: bb.config, + } + // Initialize picker to a picker that always returns + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateState with this picker. + if bb.pickerBuilder != nil { + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) + } else { + bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable) + } + return bal +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + v2PickerBuilder V2PickerBuilder + + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State + + subConns map[resolver.Address]balancer.SubConn + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker + v2Picker balancer.V2Picker + config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure +} + +func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + panic("not implemented") +} + +func (b *baseBalancer) ResolverError(err error) { + b.resolverErr = err + if len(b.subConns) == 0 { + b.state = connectivity.TransientFailure + } + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + if b.picker != nil { + b.cc.UpdateBalancerState(b.state, b.picker) + } else { + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.v2Picker, + }) + } +} + +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // TODO: handle s.ResolverState.Err (log if not nil) once implemented. + // TODO: handle s.ResolverState.ServiceConfig? + if grpclog.V(2) { + grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) + } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range s.ResolverState.Addresses { + addrsSet[a] = struct{}{} + if _, ok := b.subConns[a]; !ok { + // a is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + if err != nil { + grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns[a] = sc + b.scStates[sc] = connectivity.Idle + sc.Connect() + } + } + for a, sc := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(sc) + delete(b.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + return nil +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + if b.pickerBuilder != nil { + b.picker = NewErrPicker(balancer.ErrTransientFailure) + } else { + b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors())) + } + return + } + if b.pickerBuilder != nil { + readySCs := make(map[resolver.Address]balancer.SubConn) + + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[addr] = sc + } + } + b.picker = b.pickerBuilder.Build(readySCs) + } else { + readySCs := make(map[balancer.SubConn]SubConnInfo) + + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} + } + } + b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) + } +} + +func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + panic("not implemented") +} + +func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if grpclog.V(2) { + grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + } + oldS, ok := b.scStates[sc] + if !ok { + if grpclog.V(2) { + grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + if oldS == connectivity.TransientFailure && s == connectivity.Connecting { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError + } + + b.state = b.csEvltr.RecordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + b.state == connectivity.TransientFailure { + b.regeneratePicker() + } + + if b.picker != nil { + b.cc.UpdateBalancerState(b.state, b.picker) + } else { + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker}) + } +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call RemoveSubConn for the SubConns. +func (b *baseBalancer) Close() { +} + +// NewErrPicker returns a picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) { + return nil, nil, p.err +} + +// NewErrPickerV2 returns a V2Picker that always returns err on Pick(). +func NewErrPickerV2(err error) balancer.V2Picker { + return &errPickerV2{err: err} +} + +type errPickerV2 struct { + err error // Pick() always returns this err. +} + +func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 00000000..4192918b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,93 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build takes a slice of ready SubConns, and returns a picker that will be + // used by gRPC to pick a SubConn. + Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker +} + +// V2PickerBuilder creates balancer.V2Picker. +type V2PickerBuilder interface { + // Build returns a picker that will be used by gRPC to pick a SubConn. + Build(info PickerBuildInfo) balancer.V2Picker +} + +// PickerBuildInfo contains information needed by the picker builder to +// construct a picker. +type PickerBuildInfo struct { + // ReadySCs is a map from all ready SubConns to the Addresses used to + // create them. + ReadySCs map[balancer.SubConn]SubConnInfo +} + +// SubConnInfo contains information about a SubConn created by the base +// balancer. +type SubConnInfo struct { + Address resolver.Address // the address used to create this SubConn +} + +// NewBalancerBuilder returns a balancer builder. The balancers +// built by this builder will use the picker builder to build pickers. +func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { + return NewBalancerBuilderWithConfig(name, pb, Config{}) +} + +// Config contains the config info about the base balancer builder. +type Config struct { + // HealthCheck indicates whether health checking should be enabled for this specific balancer. + HealthCheck bool +} + +// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config. +func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + config: config, + } +} + +// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config. +func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + v2PickerBuilder: pb, + config: config, + } +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 00000000..d4d64550 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcrand" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker { + grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info) + if len(info.ReadySCs) == 0 { + return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable) + } + var scs []balancer.SubConn + for sc := range info.ReadySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + // Start at a random index, as the same RR balancer rebuilds a new + // picker when SubConn states change, and we don't want to apply excess + // load to the first server in the list. + next: grpcrand.Intn(len(scs)), + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + + mu sync.Mutex + next int +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + p.mu.Lock() + sc := p.subConns[p.next] + p.next = (p.next + 1) % len(p.subConns) + p.mu.Unlock() + return balancer.PickResult{SubConn: sc}, nil +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 00000000..f8667a23 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,271 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" +) + +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +// ccBalancerWrapper is a wrapper on top of cc for balancers. +// It implements balancer.ClientConn interface. +type ccBalancerWrapper struct { + cc *ClientConn + balancerMu sync.Mutex // synchronizes calls to the balancer + balancer balancer.Balancer + scBuffer *buffer.Unbounded + done *grpcsync.Event + + mu sync.Mutex + subConns map[*acBalancerWrapper]struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + scBuffer: buffer.NewUnbounded(), + done: grpcsync.NewEvent(), + subConns: make(map[*acBalancerWrapper]struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + return ccb +} + +// watcher balancer functions sequentially, so the balancer can be implemented +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.scBuffer.Get(): + ccb.scBuffer.Load() + if ccb.done.HasFired() { + break + } + ccb.balancerMu.Lock() + su := t.(*scStateUpdate) + if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { + ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) + } else { + ccb.balancer.HandleSubConnStateChange(su.sc, su.state) + } + ccb.balancerMu.Unlock() + case <-ccb.done.Done(): + } + + if ccb.done.HasFired() { + ccb.balancer.Close() + ccb.mu.Lock() + scs := ccb.subConns + ccb.subConns = nil + ccb.mu.Unlock() + for acbw := range scs { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + } + ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) + return + } + } +} + +func (ccb *ccBalancerWrapper) close() { + ccb.done.Fire() +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.scBuffer.Put(&scStateUpdate{ + sc: sc, + state: s, + err: err, + }) +} + +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.balancerMu.Lock() + defer ccb.balancerMu.Unlock() + if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { + return ub.UpdateClientConnState(*ccs) + } + ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil) + return nil +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { + if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { + ccb.balancerMu.Lock() + ub.ResolverError(err) + ccb.balancerMu.Unlock() + } +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) <= 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") + } + ac, err := ccb.cc.newAddrConn(addrs, opts) + if err != nil { + return nil, err + } + acbw := &acBalancerWrapper{ac: ac} + acbw.ac.mu.Lock() + ac.acbw = acbw + acbw.ac.mu.Unlock() + ccb.subConns[acbw] = struct{}{} + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + delete(ccb.subConns, acbw) + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + ccb.cc.blockingpicker.updatePicker(p) + ccb.cc.csMgr.updateState(s) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + ccb.cc.blockingpicker.updatePickerV2(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + ccb.cc.resolveNow(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + if len(addrs) <= 0 { + acbw.ac.tearDown(errConnDrain) + return + } + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + opts := acbw.ac.scopts + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.tearDown(errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + ac, err := cc.newAddrConn(addrs, opts) + if err != nil { + channelz.Warningf(acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = ac + ac.mu.Lock() + ac.acbw = acbw + ac.mu.Unlock() + if acState != connectivity.Idle { + ac.connect() + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go new file mode 100644 index 00000000..db04b08b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -0,0 +1,334 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type balancerWrapperBuilder struct { + b Balancer // The v1 balancer. +} + +func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + bwb.b.Start(opts.Target.Endpoint, BalancerConfig{ + DialCreds: opts.DialCreds, + Dialer: opts.Dialer, + }) + _, pickfirst := bwb.b.(*pickFirst) + bw := &balancerWrapper{ + balancer: bwb.b, + pickfirst: pickfirst, + cc: cc, + targetAddr: opts.Target.Endpoint, + startCh: make(chan struct{}), + conns: make(map[resolver.Address]balancer.SubConn), + connSt: make(map[balancer.SubConn]*scState), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + state: connectivity.Idle, + } + cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw}) + go bw.lbWatcher() + return bw +} + +func (bwb *balancerWrapperBuilder) Name() string { + return "wrapper" +} + +type scState struct { + addr Address // The v1 address type. + s connectivity.State + down func(error) +} + +type balancerWrapper struct { + balancer Balancer // The v1 balancer. + pickfirst bool + + cc balancer.ClientConn + targetAddr string // Target without the scheme. + + mu sync.Mutex + conns map[resolver.Address]balancer.SubConn + connSt map[balancer.SubConn]*scState + // This channel is closed when handling the first resolver result. + // lbWatcher blocks until this is closed, to avoid race between + // - NewSubConn is created, cc wants to notify balancer of state changes; + // - Build hasn't return, cc doesn't have access to balancer. + startCh chan struct{} + + // To aggregate the connectivity state. + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State +} + +// lbWatcher watches the Notify channel of the balancer and manages +// connections accordingly. +func (bw *balancerWrapper) lbWatcher() { + <-bw.startCh + notifyCh := bw.balancer.Notify() + if notifyCh == nil { + // There's no resolver in the balancer. Connect directly. + a := resolver.Address{ + Addr: bw.targetAddr, + Type: resolver.Backend, + } + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: Address{Addr: bw.targetAddr}, + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + return + } + + for addrs := range notifyCh { + grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs) + if bw.pickfirst { + var ( + oldA resolver.Address + oldSC balancer.SubConn + ) + bw.mu.Lock() + for oldA, oldSC = range bw.conns { + break + } + bw.mu.Unlock() + if len(addrs) <= 0 { + if oldSC != nil { + // Teardown old sc. + bw.mu.Lock() + delete(bw.conns, oldA) + delete(bw.connSt, oldSC) + bw.mu.Unlock() + bw.cc.RemoveSubConn(oldSC) + } + continue + } + + var newAddrs []resolver.Address + for _, a := range addrs { + newAddr := resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + } + newAddrs = append(newAddrs, newAddr) + } + if oldSC == nil { + // Create new sc. + sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) + } else { + bw.mu.Lock() + // For pickfirst, there should be only one SubConn, so the + // address doesn't matter. All states updating (up and down) + // and picking should all happen on that only SubConn. + bw.conns[resolver.Address{}] = sc + bw.connSt[sc] = &scState{ + addr: addrs[0], // Use the first address. + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } else { + bw.mu.Lock() + bw.connSt[oldSC].addr = addrs[0] + bw.mu.Unlock() + oldSC.UpdateAddresses(newAddrs) + } + } else { + var ( + add []resolver.Address // Addresses need to setup connections. + del []balancer.SubConn // Connections need to tear down. + ) + resAddrs := make(map[resolver.Address]Address) + for _, a := range addrs { + resAddrs[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + }] = a + } + bw.mu.Lock() + for a := range resAddrs { + if _, ok := bw.conns[a]; !ok { + add = append(add, a) + } + } + for a, c := range bw.conns { + if _, ok := resAddrs[a]; !ok { + del = append(del, c) + delete(bw.conns, a) + // Keep the state of this sc in bw.connSt until its state becomes Shutdown. + } + } + bw.mu.Unlock() + for _, a := range add { + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: resAddrs[a], + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } + for _, c := range del { + bw.cc.RemoveSubConn(c) + } + } + } +} + +func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + bw.mu.Lock() + defer bw.mu.Unlock() + scSt, ok := bw.connSt[sc] + if !ok { + return + } + if s == connectivity.Idle { + sc.Connect() + } + oldS := scSt.s + scSt.s = s + if oldS != connectivity.Ready && s == connectivity.Ready { + scSt.down = bw.balancer.Up(scSt.addr) + } else if oldS == connectivity.Ready && s != connectivity.Ready { + if scSt.down != nil { + scSt.down(errConnClosing) + } + } + sa := bw.csEvltr.RecordTransition(oldS, s) + if bw.state != sa { + bw.state = sa + } + bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw}) + if s == connectivity.Shutdown { + // Remove state for this sc. + delete(bw.connSt, sc) + } +} + +func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + // There should be a resolver inside the balancer. + // All updates here, if any, are ignored. +} + +func (bw *balancerWrapper) Close() { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + bw.balancer.Close() +} + +// The picker is the balancerWrapper itself. +// It either blocks or returns error, consistent with v1 balancer Get(). +func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) { + failfast := true // Default failfast is true. + if ss, ok := rpcInfoFromContext(info.Ctx); ok { + failfast = ss.failfast + } + a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast}) + if err != nil { + return balancer.PickResult{}, toRPCErr(err) + } + if p != nil { + result.Done = func(balancer.DoneInfo) { p() } + defer func() { + if err != nil { + p() + } + }() + } + + bw.mu.Lock() + defer bw.mu.Unlock() + if bw.pickfirst { + // Get the first sc in conns. + for _, result.SubConn = range bw.conns { + return result, nil + } + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } + var ok1 bool + result.SubConn, ok1 = bw.conns[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, + ServerName: "", + Metadata: a.Metadata, + }] + s, ok2 := bw.connSt[result.SubConn] + if !ok1 || !ok2 { + // This can only happen due to a race where Get() returned an address + // that was subsequently removed by Notify. In this case we should + // retry always. + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } + switch s.s { + case connectivity.Ready, connectivity.Idle: + return result, nil + case connectivity.Shutdown, connectivity.TransientFailure: + // If the returned sc has been shut down or is in transient failure, + // return error, and this RPC will fail or wait for another picker (if + // non-failfast). + return balancer.PickResult{}, balancer.ErrTransientFailure + default: + // For other states (connecting or unknown), the v1 balancer would + // traditionally wait until ready and then issue the RPC. Returning + // ErrNoSubConnAvailable will be a slight improvement in that it will + // allow the balancer to choose another address in case others are + // connected. + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + } +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go new file mode 100644 index 00000000..f393bb66 --- /dev/null +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -0,0 +1,900 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto + +package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import duration "github.com/golang/protobuf/ptypes/duration" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Enumerates the type of event +// Note the terminology is different from the RPC semantics +// definition, but the same meaning is expressed here. +type GrpcLogEntry_EventType int32 + +const ( + GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 + // Header sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 + // Header sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 + // Message sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 + // Message sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 + // A signal that client is done sending + GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 + // Trailer indicates the end of the RPC. + // On client side, this event means a trailer was either received + // from the network or the gRPC library locally generated a status + // to inform the application about a failure. + // On server side, this event means the server application requested + // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after + // this due to races on server side. + GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 + // A signal that the RPC is cancelled. On client side, this + // indicates the client application requests a cancellation. + // On server side, this indicates that cancellation was detected. + // Note: This marks the end of the RPC. Events may arrive after + // this due to races. For example, on client side a trailer + // may arrive even though the application requested to cancel the RPC. + GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 +) + +var GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", +} +var GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, +} + +func (x GrpcLogEntry_EventType) String() string { + return proto.EnumName(GrpcLogEntry_EventType_name, int32(x)) +} +func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0} +} + +// Enumerates the entity that generates the log entry +type GrpcLogEntry_Logger int32 + +const ( + GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 + GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 + GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 +) + +var GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", +} +var GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, +} + +func (x GrpcLogEntry_Logger) String() string { + return proto.EnumName(GrpcLogEntry_Logger_name, int32(x)) +} +func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1} +} + +type Address_Type int32 + +const ( + Address_TYPE_UNKNOWN Address_Type = 0 + // address is in 1.2.3.4 form + Address_TYPE_IPV4 Address_Type = 1 + // address is in IPv6 canonical form (RFC5952 section 4) + // The scope is NOT included in the address string. + Address_TYPE_IPV6 Address_Type = 2 + // address is UDS string + Address_TYPE_UNIX Address_Type = 3 +) + +var Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", +} +var Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, +} + +func (x Address_Type) String() string { + return proto.EnumName(Address_Type_name, int32(x)) +} +func (Address_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0} +} + +// Log entry we store in binary logs +type GrpcLogEntry struct { + // The timestamp of the binary log message + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Uniquely identifies a call. The value must not be 0 in order to disambiguate + // from an unset value. + // Each call may have several log entries, they will all have the same call_id. + // Nothing is guaranteed about their value other than they are unique across + // different RPCs in the same gRPC process. + CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` + // The entry sequence id for this call. The first GrpcLogEntry has a + // value of 1, to disambiguate from an unset value. The purpose of + // this field is to detect missing entries in environments where + // durability or ordering is not guaranteed. + SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` + Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` + // The logger uses one of the following fields to record the payload, + // according to the type of the log entry. + // + // Types that are valid to be assigned to Payload: + // *GrpcLogEntry_ClientHeader + // *GrpcLogEntry_ServerHeader + // *GrpcLogEntry_Message + // *GrpcLogEntry_Trailer + Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` + // true if payload does not represent the full message or metadata. + PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` + // Peer address information, will only be recorded on the first + // incoming event. On client side, peer is logged on + // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in + // the case of trailers-only. On server side, peer is always + // logged on EVENT_TYPE_CLIENT_HEADER. + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} } +func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) } +func (*GrpcLogEntry) ProtoMessage() {} +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{0} +} +func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b) +} +func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic) +} +func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_GrpcLogEntry.Merge(dst, src) +} +func (m *GrpcLogEntry) XXX_Size() int { + return xxx_messageInfo_GrpcLogEntry.Size(m) +} +func (m *GrpcLogEntry) XXX_DiscardUnknown() { + xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo + +func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *GrpcLogEntry) GetCallId() uint64 { + if m != nil { + return m.CallId + } + return 0 +} + +func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if m != nil { + return m.SequenceIdWithinCall + } + return 0 +} + +func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if m != nil { + return m.Type + } + return GrpcLogEntry_EVENT_TYPE_UNKNOWN +} + +func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if m != nil { + return m.Logger + } + return GrpcLogEntry_LOGGER_UNKNOWN +} + +type isGrpcLogEntry_Payload interface { + isGrpcLogEntry_Payload() +} + +type GrpcLogEntry_ClientHeader struct { + ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` +} + +type GrpcLogEntry_ServerHeader struct { + ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { + Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { + Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` +} + +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + +func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (m *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } + return nil +} + +func (m *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } + return nil +} + +func (m *GrpcLogEntry) GetMessage() *Message { + if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok { + return x.Message + } + return nil +} + +func (m *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } + return nil +} + +func (m *GrpcLogEntry) GetPayloadTruncated() bool { + if m != nil { + return m.PayloadTruncated + } + return false +} + +func (m *GrpcLogEntry) GetPeer() *Address { + if m != nil { + return m.Peer + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } +} + +func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GrpcLogEntry) + // payload + switch x := m.Payload.(type) { + case *GrpcLogEntry_ClientHeader: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClientHeader); err != nil { + return err + } + case *GrpcLogEntry_ServerHeader: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ServerHeader); err != nil { + return err + } + case *GrpcLogEntry_Message: + b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Message); err != nil { + return err + } + case *GrpcLogEntry_Trailer: + b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Trailer); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x) + } + return nil +} + +func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GrpcLogEntry) + switch tag { + case 6: // payload.client_header + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClientHeader) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_ClientHeader{msg} + return true, err + case 7: // payload.server_header + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ServerHeader) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_ServerHeader{msg} + return true, err + case 8: // payload.message + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Message) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_Message{msg} + return true, err + case 9: // payload.trailer + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Trailer) + err := b.DecodeMessage(msg) + m.Payload = &GrpcLogEntry_Trailer{msg} + return true, err + default: + return false, nil + } +} + +func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GrpcLogEntry) + // payload + switch x := m.Payload.(type) { + case *GrpcLogEntry_ClientHeader: + s := proto.Size(x.ClientHeader) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *GrpcLogEntry_ServerHeader: + s := proto.Size(x.ServerHeader) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *GrpcLogEntry_Message: + s := proto.Size(x.Message) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *GrpcLogEntry_Trailer: + s := proto.Size(x.Trailer) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ClientHeader struct { + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The name of the RPC method, which looks something like: + // // + // Note the leading "/" character. + MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // A single process may be used to run multiple virtual + // servers with different identities. + // The authority is the name of such a server identitiy. + // It is typically a portion of the URI in the form of + // or : . + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // the RPC timeout + Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientHeader) Reset() { *m = ClientHeader{} } +func (m *ClientHeader) String() string { return proto.CompactTextString(m) } +func (*ClientHeader) ProtoMessage() {} +func (*ClientHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{1} +} +func (m *ClientHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientHeader.Unmarshal(m, b) +} +func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic) +} +func (dst *ClientHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientHeader.Merge(dst, src) +} +func (m *ClientHeader) XXX_Size() int { + return xxx_messageInfo_ClientHeader.Size(m) +} +func (m *ClientHeader) XXX_DiscardUnknown() { + xxx_messageInfo_ClientHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientHeader proto.InternalMessageInfo + +func (m *ClientHeader) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *ClientHeader) GetMethodName() string { + if m != nil { + return m.MethodName + } + return "" +} + +func (m *ClientHeader) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *ClientHeader) GetTimeout() *duration.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +type ServerHeader struct { + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerHeader) Reset() { *m = ServerHeader{} } +func (m *ServerHeader) String() string { return proto.CompactTextString(m) } +func (*ServerHeader) ProtoMessage() {} +func (*ServerHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{2} +} +func (m *ServerHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerHeader.Unmarshal(m, b) +} +func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic) +} +func (dst *ServerHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerHeader.Merge(dst, src) +} +func (m *ServerHeader) XXX_Size() int { + return xxx_messageInfo_ServerHeader.Size(m) +} +func (m *ServerHeader) XXX_DiscardUnknown() { + xxx_messageInfo_ServerHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerHeader proto.InternalMessageInfo + +func (m *ServerHeader) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +type Trailer struct { + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The gRPC status code. + StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // An original status message before any transport specific + // encoding. + StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // The value of the 'grpc-status-details-bin' metadata key. If + // present, this is always an encoded 'google.rpc.Status' message. + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Trailer) Reset() { *m = Trailer{} } +func (m *Trailer) String() string { return proto.CompactTextString(m) } +func (*Trailer) ProtoMessage() {} +func (*Trailer) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{3} +} +func (m *Trailer) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Trailer.Unmarshal(m, b) +} +func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Trailer.Marshal(b, m, deterministic) +} +func (dst *Trailer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Trailer.Merge(dst, src) +} +func (m *Trailer) XXX_Size() int { + return xxx_messageInfo_Trailer.Size(m) +} +func (m *Trailer) XXX_DiscardUnknown() { + xxx_messageInfo_Trailer.DiscardUnknown(m) +} + +var xxx_messageInfo_Trailer proto.InternalMessageInfo + +func (m *Trailer) GetMetadata() *Metadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Trailer) GetStatusCode() uint32 { + if m != nil { + return m.StatusCode + } + return 0 +} + +func (m *Trailer) GetStatusMessage() string { + if m != nil { + return m.StatusMessage + } + return "" +} + +func (m *Trailer) GetStatusDetails() []byte { + if m != nil { + return m.StatusDetails + } + return nil +} + +// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE +type Message struct { + // Length of the message. It may not be the same as the length of the + // data field, as the logging payload can be truncated or omitted. + Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + // May be truncated or omitted. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{4} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Message.Unmarshal(m, b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) +} +func (dst *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(dst, src) +} +func (m *Message) XXX_Size() int { + return xxx_messageInfo_Message.Size(m) +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +func (m *Message) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + +func (m *Message) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// A list of metadata pairs, used in the payload of client header, +// server header, and server trailer. +// Implementations may omit some entries to honor the header limits +// of GRPC_BINARY_LOG_CONFIG. +// +// Header keys added by gRPC are omitted. To be more specific, +// implementations will not log the following entries, and this is +// not to be treated as a truncation: +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials +// +// Implementations must always log grpc-trace-bin if it is present. +// Practically speaking it will only be visible on server side because +// grpc-trace-bin is managed by low level client side mechanisms +// inaccessible from the application level. On server side, the +// header is just a normal metadata key. +// The pair will not count towards the size limit. +type Metadata struct { + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{5} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metadata.Unmarshal(m, b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) +} +func (dst *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(dst, src) +} +func (m *Metadata) XXX_Size() int { + return xxx_messageInfo_Metadata.Size(m) +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *Metadata) GetEntry() []*MetadataEntry { + if m != nil { + return m.Entry + } + return nil +} + +// A metadata key value pair +type MetadataEntry struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetadataEntry) Reset() { *m = MetadataEntry{} } +func (m *MetadataEntry) String() string { return proto.CompactTextString(m) } +func (*MetadataEntry) ProtoMessage() {} +func (*MetadataEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{6} +} +func (m *MetadataEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetadataEntry.Unmarshal(m, b) +} +func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic) +} +func (dst *MetadataEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetadataEntry.Merge(dst, src) +} +func (m *MetadataEntry) XXX_Size() int { + return xxx_messageInfo_MetadataEntry.Size(m) +} +func (m *MetadataEntry) XXX_DiscardUnknown() { + xxx_messageInfo_MetadataEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo + +func (m *MetadataEntry) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *MetadataEntry) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// Address information +type Address struct { + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // only for TYPE_IPV4 and TYPE_IPV6 + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Address) Reset() { *m = Address{} } +func (m *Address) String() string { return proto.CompactTextString(m) } +func (*Address) ProtoMessage() {} +func (*Address) Descriptor() ([]byte, []int) { + return fileDescriptor_binarylog_264c8c9c551ce911, []int{7} +} +func (m *Address) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Address.Unmarshal(m, b) +} +func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Address.Marshal(b, m, deterministic) +} +func (dst *Address) XXX_Merge(src proto.Message) { + xxx_messageInfo_Address.Merge(dst, src) +} +func (m *Address) XXX_Size() int { + return xxx_messageInfo_Address.Size(m) +} +func (m *Address) XXX_DiscardUnknown() { + xxx_messageInfo_Address.DiscardUnknown(m) +} + +var xxx_messageInfo_Address proto.InternalMessageInfo + +func (m *Address) GetType() Address_Type { + if m != nil { + return m.Type + } + return Address_TYPE_UNKNOWN +} + +func (m *Address) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *Address) GetIpPort() uint32 { + if m != nil { + return m.IpPort + } + return 0 +} + +func init() { + proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry") + proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader") + proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader") + proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer") + proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message") + proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata") + proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry") + proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address") + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) + proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) +} + +func init() { + proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911) +} + +var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{ + // 900 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44, + 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04, + 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d, + 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c, + 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf, + 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2, + 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09, + 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e, + 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef, + 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36, + 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5, + 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46, + 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84, + 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72, + 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa, + 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb, + 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84, + 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1, + 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c, + 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24, + 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba, + 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8, + 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5, + 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1, + 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94, + 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f, + 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec, + 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b, + 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1, + 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5, + 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b, + 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d, + 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42, + 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4, + 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd, + 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51, + 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01, + 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58, + 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5, + 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff, + 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26, + 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23, + 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44, + 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46, + 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf, + 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab, + 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32, + 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49, + 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb, + 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c, + 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0, + 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed, + 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f, + 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, + 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e, + 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50, + 0xd4, 0x07, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 00000000..9e20e4d3 --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race conditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(req); err != nil { + return err + } + return cs.RecvMsg(reply) +} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 00000000..0740693b --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1534 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "math" + "net" + "reflect" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. +) + +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second + // must match grpclbName in grpclb/grpclb.go + grpclbName = "grpclb" +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errBalancerClosed indicates that the balancer is closed. + errBalancerClosed = errors.New("grpc: balancer is closed") + // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default + // service config. + invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" +) + +// The following errors are returned from Dial and DialContext +var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // errTransportCredsAndBundle indicates that creds bundle is used together + // with other individual Transport Credentials. + errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") + // errTransportCredentialsMissing indicates that users want to transmit security + // information (e.g., OAuth2 token) which requires secure connection on an insecure + // connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") + // errCredentialsConflict indicates that grpc.WithTransportCredentials() + // and grpc.WithInsecure() are both called for a connection. + errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") +) + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + blockingpicker: newPickerWrapper(), + czData: new(channelzData), + firstResolveEvent: grpcsync.NewEvent(), + } + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) + + defer func() { + if err != nil { + cc.Close() + } + }() + + if channelz.IsOn() { + if cc.dopts.channelzParentID != 0 { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + channelz.AddTraceEvent(cc.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Channel Created", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), + Severity: channelz.CtINFO, + }, + }) + } else { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) + channelz.Info(cc.channelzID, "Channel Created") + } + cc.csMgr.channelzID = cc.channelzID + } + + if !cc.dopts.insecure { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return nil, errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return nil, errTransportCredsAndBundle + } + } else { + if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil { + return nil, errCredentialsConflict + } + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) + } + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) + } + cc.mkp = cc.dopts.copts.KeepaliveParams + + if cc.dopts.copts.Dialer == nil { + cc.dopts.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { + network, addr := parseDialTarget(addr) + return (&net.Dialer{}).DialContext(ctx, network, addr) + } + if cc.dopts.withProxy { + cc.dopts.copts.Dialer = newProxyDialer(cc.dopts.copts.Dialer) + } + } + + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + defer func() { + select { + case <-ctx.Done(): + conn, err = nil, ctx.Err() + default: + } + }() + + scSet := false + if cc.dopts.scChan != nil { + // Try to get an initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + scSet = true + } + default: + } + } + if cc.dopts.bs == nil { + cc.dopts.bs = backoff.DefaultExponential + } + + // Determine the resolver to use. + cc.parsedTarget = grpcutil.ParseTarget(cc.target) + channelz.Infof(cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) + resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + // If resolver builder is still nil, the parsed target's scheme is + // not registered. Fallback to default resolver and set Endpoint to + // the original target. + channelz.Infof(cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + cc.parsedTarget = resolver.Target{ + Scheme: resolver.GetDefaultScheme(), + Endpoint: target, + } + resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) + } + } + + creds := cc.dopts.copts.TransportCredentials + if creds != nil && creds.Info().ServerName != "" { + cc.authority = creds.Info().ServerName + } else if cc.dopts.insecure && cc.dopts.authority != "" { + cc.authority = cc.dopts.authority + } else { + // Use endpoint from "scheme://authority/endpoint" as the default + // authority for ClientConn. + cc.authority = cc.parsedTarget.Endpoint + } + + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if cc.dopts.scChan != nil { + go cc.scWatcher() + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + cc.balancerBuildOpts = balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + } + + // Build the resolver. + rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + cc.mu.Lock() + cc.resolverWrapper = rWrapper + cc.mu.Unlock() + + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + s := cc.GetState() + if s == connectivity.Ready { + break + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.blockingpicker.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + return nil, ctx.Err() + } + } + } + + return cc, nil +} + +// chainUnaryClientInterceptors chains all unary client interceptors into one. +func chainUnaryClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainUnaryInts + // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + if cc.dopts.unaryInt != nil { + interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) + } + var chainedInt UnaryClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } + cc.dopts.unaryInt = chainedInt +} + +// getChainUnaryInvoker recursively generate the chained unary invoker. +func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { + if curr == len(interceptors)-1 { + return finalInvoker + } + return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } +} + +// chainStreamClientInterceptors chains all stream client interceptors into one. +func chainStreamClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainStreamInts + // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + if cc.dopts.streamInt != nil { + interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) + } + var chainedInt StreamClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { + return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) + } + } + cc.dopts.streamInt = chainedInt +} + +// getChainStreamer recursively generate the chained client stream constructor. +func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { + if curr == len(interceptors)-1 { + return finalStreamer + } + return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) + } +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} + channelzID int64 +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + channelz.Infof(csm.channelzID, "Channel Connectivity change to %v", state) + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + +// ClientConn represents a virtual connection to a conceptual endpoint, to +// perform RPCs. +// +// A ClientConn is free to have zero or more actual connections to the endpoint +// based on configuration, load, etc. It is also free to determine which actual +// endpoints to use and may change it every RPC, permitting client-side load +// balancing. +// +// A ClientConn encapsulates a range of functionality including name +// resolution, TCP connection establishment (with retries and backoff) and TLS +// handshakes. It also handles errors on established connections by +// re-resolving the name and reconnecting. +type ClientConn struct { + ctx context.Context + cancel context.CancelFunc + + target string + parsedTarget resolver.Target + authority string + dopts dialOptions + csMgr *connectivityStateManager + + balancerBuildOpts balancer.BuildOptions + blockingpicker *pickerWrapper + + mu sync.RWMutex + resolverWrapper *ccResolverWrapper + sc *ServiceConfig + conns map[*addrConn]struct{} + // Keepalive parameter can be updated if a GoAway is received. + mkp keepalive.ClientParameters + curBalancerName string + balancerWrapper *ccBalancerWrapper + retryThrottler atomic.Value + + firstResolveEvent *grpcsync.Event + + channelzID int64 // channelz unique identification number + czData *channelzData +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +func (cc *ClientConn) scWatcher() { + for { + select { + case sc, ok := <-cc.dopts.scChan: + if !ok { + return + } + cc.mu.Lock() + // TODO: load balance policy runtime change is ignored. + // We may revisit this decision in the future. + cc.sc = &sc + cc.mu.Unlock() + case <-cc.ctx.Done(): + return + } + } +} + +// waitForResolvedAddrs blocks until the resolver has provided addresses or the +// context expires. Returns nil unless the context expires first; otherwise +// returns a status error based on the context. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { + // This is on the RPC path, so we use a fast path to avoid the + // more-expensive "select" below after the resolver has returned once. + if cc.firstResolveEvent.HasFired() { + return nil + } + select { + case <-cc.firstResolveEvent.Done(): + return nil + case <-ctx.Done(): + return status.FromContextError(ctx.Err()).Err() + case <-cc.ctx.Done(): + return ErrClientConnClosing + } +} + +var emptyServiceConfig *ServiceConfig + +func init() { + cfg := parseServiceConfig("{}") + if cfg.Err != nil { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) +} + +func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { + if cc.sc != nil { + cc.applyServiceConfigAndBalancer(cc.sc, addrs) + return + } + if cc.dopts.defaultServiceConfig != nil { + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs) + } else { + cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs) + } +} + +func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { + defer cc.firstResolveEvent.Fire() + cc.mu.Lock() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + cc.mu.Unlock() + return nil + } + + if err != nil { + // May need to apply the initial service config in case the resolver + // doesn't support service configs, or doesn't provide a service config + // with the new addresses. + cc.maybeApplyDefaultServiceConfig(nil) + + if cc.balancerWrapper != nil { + cc.balancerWrapper.resolverError(err) + } + + // No addresses are valid with err set; return early. + cc.mu.Unlock() + return balancer.ErrBadResolverState + } + + var ret error + if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + cc.maybeApplyDefaultServiceConfig(s.Addresses) + // TODO: do we need to apply a failing LB policy if there is no + // default, per the error handling design? + } else { + if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { + cc.applyServiceConfigAndBalancer(sc, s.Addresses) + } else { + ret = balancer.ErrBadResolverState + if cc.balancerWrapper == nil { + var err error + if s.ServiceConfig.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) + } else { + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) + } + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) + cc.mu.Unlock() + return ret + } + } + } + + var balCfg serviceconfig.LoadBalancingConfig + if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig.cfg + } + + cbn := cc.curBalancerName + bw := cc.balancerWrapper + cc.mu.Unlock() + if cbn != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + for i := 0; i < len(s.Addresses); { + if s.Addresses[i].Type == resolver.GRPCLB { + copy(s.Addresses[i:], s.Addresses[i+1:]) + s.Addresses = s.Addresses[:len(s.Addresses)-1] + continue + } + i++ + } + } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + if ret == nil { + ret = uccsErr // prefer ErrBadResolver state since any other error is + // currently meaningless to the caller. + } + return ret +} + +// switchBalancer starts the switching from current balancer to the balancer +// with the given name. +// +// It will NOT send the current address list to the new balancer. If needed, +// caller of this function should send address list to the new balancer after +// this function returns. +// +// Caller must hold cc.mu. +func (cc *ClientConn) switchBalancer(name string) { + if strings.EqualFold(cc.curBalancerName, name) { + return + } + + channelz.Infof(cc.channelzID, "ClientConn switching balancer to %q", name) + if cc.dopts.balancerBuilder != nil { + channelz.Info(cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") + return + } + if cc.balancerWrapper != nil { + cc.balancerWrapper.close() + } + + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) + channelz.Infof(cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(cc.channelzID, "Channel switches to new LB policy %q", name) + } + + cc.curBalancerName = builder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + // TODO(bar switching) send updates to all balancer wrappers when balancer + // gracefully switching is supported. + cc.balancerWrapper.handleSubConnStateChange(sc, s, err) + cc.mu.Unlock() +} + +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + ac := &addrConn{ + state: connectivity.Idle, + cc: cc, + addrs: addrs, + scopts: opts, + dopts: cc.dopts, + czData: new(channelzData), + resetBackoff: make(chan struct{}), + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return nil, ErrClientConnClosing + } + if channelz.IsOn() { + ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") + channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel Created", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), + Severity: channelz.CtINFO, + }, + }) + } + cc.conns[ac] = struct{}{} + cc.mu.Unlock() + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { + return &channelz.ChannelInternalMetric{ + State: cc.GetState(), + Target: cc.target, + CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), + } +} + +// Target returns the target string of the ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) Target() string { + return cc.target +} + +func (cc *ClientConn) incrCallsStarted() { + atomic.AddInt64(&cc.czData.callsStarted, 1) + atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (cc *ClientConn) incrCallsSucceeded() { + atomic.AddInt64(&cc.czData.callsSucceeded, 1) +} + +func (cc *ClientConn) incrCallsFailed() { + atomic.AddInt64(&cc.czData.callsFailed, 1) +} + +// connect starts creating a transport. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + // Update connectivity state within the lock to prevent subsequent or + // concurrent calls from resetting the transport more than once. + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + // Start a goroutine connecting to the server asynchronously. + go ac.resetTransport() + return nil +} + +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// If ac is Connecting, it returns false. The caller should tear down the ac and +// create a new one. Note that the backoff will be reset when this happens. +// +// If ac is TransientFailure, it updates ac.addrs and returns true. The updated +// addresses will be picked up by retry in the next iteration after backoff. +// +// If ac is Shutdown or Idle, it updates ac.addrs and returns true. +// +// If ac is Ready, it checks whether current connected address of ac is in the +// new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { + ac.addrs = addrs + return true + } + + if ac.state == connectivity.Connecting { + return false + } + + // ac.state is Ready, try to find the connected address. + var curAddrFound bool + for _, a := range addrs { + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + } + + return curAddrFound +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the default config +// under the service (i.e /service/). If there is a default MethodConfig for +// the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return MethodConfig{} + } + m, ok := cc.sc.Methods[method] + if !ok { + i := strings.LastIndex(method, "/") + m = cc.sc.Methods[method[:i+1]] + } + return m +} + +func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return nil + } + return cc.sc.healthCheckConfig +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + Ctx: ctx, + FullMethodName: method, + }) + if err != nil { + return nil, nil, toRPCErr(err) + } + return t, done, nil +} + +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) { + if sc == nil { + // should never reach here. + return + } + cc.sc = sc + + if cc.sc.retryThrottling != nil { + newThrottler := &retryThrottler{ + tokens: cc.sc.retryThrottling.MaxTokens, + max: cc.sc.retryThrottling.MaxTokens, + thresh: cc.sc.retryThrottling.MaxTokens / 2, + ratio: cc.sc.retryThrottling.TokenRatio, + } + cc.retryThrottler.Store(newThrottler) + } else { + cc.retryThrottler.Store((*retryThrottler)(nil)) + } + + if cc.dopts.balancerBuilder == nil { + // Only look at balancer types and switch balancer if balancer dial + // option is not set. + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } + } + cc.switchBalancer(newBalancerName) + } else if cc.balancerWrapper == nil { + // Balancer dial option was set, and this is the first time handling + // resolved addresses. Build a balancer with dopts.balancerBuilder. + cc.curBalancerName = cc.dopts.balancerBuilder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + } +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { + cc.mu.RLock() + r := cc.resolverWrapper + cc.mu.RUnlock() + if r == nil { + return + } + go r.resolveNow(o) +} + +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately. It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used. Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// This API is EXPERIMENTAL. +func (cc *ClientConn) ResetConnectBackoff() { + cc.mu.Lock() + conns := cc.conns + cc.mu.Unlock() + for ac := range conns { + ac.resetConnectBackoff() + } +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + defer cc.cancel() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper + cc.balancerWrapper = nil + cc.mu.Unlock() + + cc.blockingpicker.close() + + if rWrapper != nil { + rWrapper.close() + } + if bWrapper != nil { + bWrapper.close() + } + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + if channelz.IsOn() { + ted := &channelz.TraceEventDesc{ + Desc: "Channel Deleted", + Severity: channelz.CtINFO, + } + if cc.dopts.channelzParentID != 0 { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), + Severity: channelz.CtINFO, + } + } + channelz.AddTraceEvent(cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity being deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(cc.channelzID) + } + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + dopts dialOptions + acbw balancer.SubConn + scopts balancer.NewSubConnOptions + + // transport is set when there's a viable transport (note: ac state may not be READY as LB channel + // health checking may require server to report healthy to set ac to READY), and is reset + // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway + // is received, transport is closed, ac has been torn down). + transport transport.ClientTransport // The current transport. + + mu sync.Mutex + curAddr resolver.Address // The current address. + addrs []resolver.Address // All addresses that the resolver resolved to. + + // Use updateConnectivityState for updating addrConn's connectivity state. + state connectivity.State + + backoffIdx int // Needs to be stateful for resetConnectBackoff. + resetBackoff chan struct{} + + channelzID int64 // channelz unique identification number. + czData *channelzData +} + +// Note: this requires a lock on ac.mu. +func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { + if ac.state == s { + return + } + ac.state = s + channelz.Infof(ac.channelzID, "Subchannel Connectivity change to %v", s) + ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.GoAwayTooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +func (ac *addrConn) resetTransport() { + for i := 0; ; i++ { + if i > 0 { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + } + + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.transport = nil + ac.mu.Unlock() + + newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline) + if err != nil { + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + ac.updateConnectivityState(connectivity.TransientFailure, err) + + // Backoff. + b := ac.resetBackoff + ac.mu.Unlock() + + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: + ac.mu.Lock() + ac.backoffIdx++ + ac.mu.Unlock() + case <-b: + timer.Stop() + case <-ac.ctx.Done(): + timer.Stop() + return + } + continue + } + + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + newTr.Close() + return + } + ac.curAddr = addr + ac.transport = newTr + ac.backoffIdx = 0 + + hctx, hcancel := context.WithCancel(ac.ctx) + ac.startHealthCheck(hctx) + ac.mu.Unlock() + + // Block until the created transport is down. And when this happens, + // we restart from the top of the addr list. + <-reconnect.Done() + hcancel() + // restart connecting - the top of the loop will set state to + // CONNECTING. This is against the current connectivity semantics doc, + // however it allows for graceful behavior for RPCs not yet dispatched + // - unfortunate timing would otherwise lead to the RPC failing even + // though the TRANSIENT_FAILURE state (called for by the doc) would be + // instantaneous. + // + // Ideally we should transition to Idle here and block until there is + // RPC activity that leads to the balancer requesting a reconnect of + // the associated SubConn. + } +} + +// tryAllAddrs tries to creates a connection to the addresses, and stop when at the +// first successful one. It returns the transport, the address and a Event in +// the successful case. The Event fires when the returned transport disconnects. +func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { + var firstConnErr error + for _, addr := range addrs { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return nil, resolver.Address{}, nil, errConnClosing + } + + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + + copts := ac.dopts.copts + if ac.scopts.CredsBundle != nil { + copts.CredsBundle = ac.scopts.CredsBundle + } + ac.mu.Unlock() + + channelz.Infof(ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) + + newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) + if err == nil { + return newTr, addr, reconnect, nil + } + if firstConnErr == nil { + firstConnErr = err + } + ac.cc.blockingpicker.updateConnectionError(err) + } + + // Couldn't connect to any address. + return nil, resolver.Address{}, nil, firstConnErr +} + +// createTransport creates a connection to addr. It returns the transport and a +// Event in the successful case. The Event fires when the returned transport +// disconnects. +func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) { + prefaceReceived := make(chan struct{}) + onCloseCalled := make(chan struct{}) + reconnect := grpcsync.NewEvent() + + authority := ac.cc.authority + // addr.ServerName takes precedent over ClientConn authority, if present. + if addr.ServerName != "" { + authority = addr.ServerName + } + + target := transport.TargetInfo{ + Addr: addr.Addr, + Metadata: addr.Metadata, + Authority: authority, + } + + once := sync.Once{} + onGoAway := func(r transport.GoAwayReason) { + ac.mu.Lock() + ac.adjustParams(r) + once.Do(func() { + if ac.state == connectivity.Ready { + // Prevent this SubConn from being used for new RPCs by setting its + // state to Connecting. + // + // TODO: this should be Idle when grpc-go properly supports it. + ac.updateConnectivityState(connectivity.Connecting, nil) + } + }) + ac.mu.Unlock() + reconnect.Fire() + } + + onClose := func() { + ac.mu.Lock() + once.Do(func() { + if ac.state == connectivity.Ready { + // Prevent this SubConn from being used for new RPCs by setting its + // state to Connecting. + // + // TODO: this should be Idle when grpc-go properly supports it. + ac.updateConnectivityState(connectivity.Connecting, nil) + } + }) + ac.mu.Unlock() + close(onCloseCalled) + reconnect.Fire() + } + + onPrefaceReceipt := func() { + close(prefaceReceived) + } + + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + defer cancel() + if channelz.IsOn() { + copts.ChannelzParentID = ac.channelzID + } + + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) + if err != nil { + // newTr is either nil, or closed. + channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) + return nil, nil, err + } + + select { + case <-time.After(time.Until(connectDeadline)): + // We didn't get the preface in time. + newTr.Close() + channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) + return nil, nil, errors.New("timed out waiting for server handshake") + case <-prefaceReceived: + // We got the preface - huzzah! things are good. + case <-onCloseCalled: + // The transport has already closed - noop. + return nil, nil, errors.New("connection closed") + // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix. + } + return newTr, reconnect, nil +} + +// startHealthCheck starts the health checking stream (RPC) to watch the health +// stats of this connection if health checking is requested and configured. +// +// LB channel health checking is enabled when all requirements below are met: +// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption +// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package +// 3. a service config with non-empty healthCheckConfig field is provided +// 4. the load balancer requests it +// +// It sets addrConn to READY if the health checking stream is not started. +// +// Caller must hold ac.mu. +func (ac *addrConn) startHealthCheck(ctx context.Context) { + var healthcheckManagingState bool + defer func() { + if !healthcheckManagingState { + ac.updateConnectivityState(connectivity.Ready, nil) + } + }() + + if ac.cc.dopts.disableHealthCheck { + return + } + healthCheckConfig := ac.cc.healthCheckConfig() + if healthCheckConfig == nil { + return + } + if !ac.scopts.HealthCheckEnabled { + return + } + healthCheckFunc := ac.cc.dopts.healthCheckFunc + if healthCheckFunc == nil { + // The health package is not imported to set health check function. + // + // TODO: add a link to the health check doc in the error message. + channelz.Error(ac.channelzID, "Health check is requested but health check function is not set.") + return + } + + healthcheckManagingState = true + + // Set up the health check helper functions. + currentTr := ac.transport + newStream := func(method string) (interface{}, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() + return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") + } + ac.mu.Unlock() + return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) + } + setConnectivityState := func(s connectivity.State, lastErr error) { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.transport != currentTr { + return + } + ac.updateConnectivityState(s, lastErr) + } + // Start the health checking stream. + go func() { + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + if err != nil { + if status.Code(err) == codes.Unimplemented { + channelz.Error(ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") + } else { + channelz.Errorf(ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + } + } + }() +} + +func (ac *addrConn) resetConnectBackoff() { + ac.mu.Lock() + close(ac.resetBackoff) + ac.backoffIdx = 0 + ac.resetBackoff = make(chan struct{}) + ac.mu.Unlock() +} + +// getReadyTransport returns the transport if ac's state is READY. +// Otherwise it returns nil, false. +// If ac's state is IDLE, it will trigger ac to connect. +func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { + ac.mu.Lock() + if ac.state == connectivity.Ready && ac.transport != nil { + t := ac.transport + ac.mu.Unlock() + return t, true + } + var idle bool + if ac.state == connectivity.Idle { + idle = true + } + ac.mu.Unlock() + // Trigger idle ac to connect. + if idle { + ac.connect() + } + return nil, false +} + +// tearDown starts to tear down the addrConn. +// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in +// some edge cases (e.g., the caller opens and closes many addrConn's in a +// tight loop. +// tearDown doesn't remove ac from ac.cc.conns. +func (ac *addrConn) tearDown(err error) { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + curTr := ac.transport + ac.transport = nil + // We have to set the state to Shutdown before anything else to prevent races + // between setting the state and logic that waits on context cancellation / etc. + ac.updateConnectivityState(connectivity.Shutdown, nil) + ac.cancel() + ac.curAddr = resolver.Address{} + if err == errConnDrain && curTr != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. + ac.mu.Unlock() + curTr.GracefulClose() + ac.mu.Lock() + } + if channelz.IsOn() { + channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel Deleted", + Severity: channelz.CtINFO, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), + Severity: channelz.CtINFO, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to + // the entity being deleted, and thus prevent it from being deleted right away. + channelz.RemoveEntry(ac.channelzID) + } + ac.mu.Unlock() +} + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} + +func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { + ac.mu.Lock() + addr := ac.curAddr.Addr + ac.mu.Unlock() + return &channelz.ChannelInternalMetric{ + State: ac.getState(), + Target: addr, + CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), + } +} + +func (ac *addrConn) incrCallsStarted() { + atomic.AddInt64(&ac.czData.callsStarted, 1) + atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + atomic.AddInt64(&ac.czData.callsSucceeded, 1) +} + +func (ac *addrConn) incrCallsFailed() { + atomic.AddInt64(&ac.czData.callsFailed, 1) +} + +type retryThrottler struct { + max float64 + thresh float64 + ratio float64 + + mu sync.Mutex + tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { + if rt == nil { + return false + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens-- + if rt.tokens < 0 { + rt.tokens = 0 + } + return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { + if rt == nil { + return + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens += rt.ratio + if rt.tokens > rt.max { + rt.tokens = rt.max + } +} + +type channelzChannel struct { + cc *ClientConn +} + +func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { + return c.cc.channelzMetric() +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(scheme) +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 00000000..12977654 --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" +) + +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. +type baseCodec interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh new file mode 100644 index 00000000..4cdc6ba7 --- /dev/null +++ b/vendor/google.golang.org/grpc/codegen.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# This script serves as an example to demonstrate how to generate the gRPC-Go +# interface and the related messages from .proto file. +# +# It assumes the installation of i) Google proto buffer compiler at +# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen +# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have +# not, please install them first. +# +# We recommend running this script at $GOPATH/src. +# +# If this is not what you need, feel free to make your own scripts. Again, this +# script is for demonstration purpose. +# +proto=$1 +protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 00000000..34ec36fb --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +// All APIs in this package are experimental. +package connectivity + +import ( + "context" + + "google.golang.org/grpc/grpclog" +) + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + grpclog.Errorf("unknown connectivity state: %d", s) + return "Invalid-State" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClientConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// Reporter reports the connectivity states. +type Reporter interface { + // CurrentState returns the current state of the reporter. + CurrentState() State + // WaitForStateChange blocks until the reporter's state is different from the given state, + // and returns true. + // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). + WaitForStateChange(context.Context, State) bool +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 00000000..e438fda2 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,255 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "context" + "errors" + "fmt" + "net" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/internal" +) + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing + // tokens if required. This should be called by the transport layer on + // each request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status + // for the RPC. uri is the URI of the entry point for the request. + // When supported by the underlying implementation, ctx can be used for + // timeout and cancellation. Additionally, RequestInfo data will be + // available via ctx to this call. + // TODO(zhaoq): Define the set of the qualified keys instead of leaving + // it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // NoSecurity indicates a connection is insecure. + // The zero SecurityLevel value is invalid for backward compatibility. + NoSecurity SecurityLevel = iota + 1 + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo { + return c +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. It is a static version string from the + // credentials, not a value that reflects per-connection protocol negotiation. To retrieve + // details about the credentials used for a connection, use the Peer's AuthInfo field instead. + // + // Deprecated: please use Peer.AuthInfo. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the corresponding + // authentication protocol on rawConn for clients. It returns the authenticated + // connection and the corresponding auth information about the connection. + // The auth information should embed CommonAuthInfo to return additional information about + // the credentials. Implementations must use the provided context to implement timely cancellation. + // gRPC will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). + // If the returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. + // gRPC internals also use it to override the virtual hosting name if it is set. + // It must be called before dialing. Currently, this is only used by grpclb. + OverrideServerName(string) error +} + +// Bundle is a combination of TransportCredentials and PerRPCCredentials. +// +// It also contains a mode switching method, so it can be used as a combination +// of different credential policies. +// +// Bundle cannot be used together with individual TransportCredentials. +// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. +// +// This API is experimental. +type Bundle interface { + TransportCredentials() TransportCredentials + PerRPCCredentials() PerRPCCredentials + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the + // existing Bundle may cause races. + // + // NewWithMode returns nil if the requested mode is not supported. + NewWithMode(mode string) (Bundle, error) +} + +// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. +// +// This API is experimental. +type RequestInfo struct { + // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") + Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo +} + +// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object. +type requestInfoKey struct{} + +// RequestInfoFromContext extracts the RequestInfo from the context if it exists. +// +// This API is experimental. +func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { + ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) + return +} + +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() *CommonAuthInfo + } + ri, _ := RequestInfoFromContext(ctx) + if ri.AuthInfo == nil { + return errors.New("unable to obtain SecurityLevel from context") + } + if ci, ok := ri.AuthInfo.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == 0 { + return nil + } + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) + } + } + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil +} + +func init() { + internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) + } +} + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +// +// This API is experimental. +type ChannelzSecurityInfo interface { + GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +// +// This API is experimental. +type ChannelzSecurityValue interface { + isChannelzSecurityValue() +} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +// +// This API is experimental. +type OtherChannelzSecurityValue struct { + ChannelzSecurityValue + Name string + Value proto.Message +} diff --git a/vendor/google.golang.org/grpc/credentials/go12.go b/vendor/google.golang.org/grpc/credentials/go12.go new file mode 100644 index 00000000..ccbf35b3 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/go12.go @@ -0,0 +1,30 @@ +// +build go1.12 + +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import "crypto/tls" + +// This init function adds cipher suite constants only defined in Go 1.12. +func init() { + cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256" + cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384" + cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256" +} diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go new file mode 100644 index 00000000..2f4472be --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go @@ -0,0 +1,61 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains credentials-internal code. +package internal + +import ( + "net" + "syscall" +) + +type sysConn = syscall.Conn + +// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. +// SyscallConn() (the method in interface syscall.Conn) is explicitly +// implemented on this type, +// +// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. +// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns +// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn +// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't +// help here). +type syscallConn struct { + net.Conn + // sysConn is a type alias of syscall.Conn. It's necessary because the name + // `Conn` collides with `net.Conn`. + sysConn +} + +// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that +// implements syscall.Conn. rawConn will be used to support syscall, and newConn +// will be used for read/write. +// +// This function returns newConn if rawConn doesn't implement syscall.Conn. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + sysConn, ok := rawConn.(syscall.Conn) + if !ok { + return newConn + } + return &syscallConn{ + Conn: newConn, + sysConn: sysConn, + } +} diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go new file mode 100644 index 00000000..d4346e9e --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go @@ -0,0 +1,30 @@ +// +build appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// WrapSyscallConn returns newConn on appengine. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + return newConn +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go new file mode 100644 index 00000000..86e956bc --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -0,0 +1,235 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + + "google.golang.org/grpc/credentials/internal" +) + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState + CommonAuthInfo +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup[t.State.CipherSuite], + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := cloneTLSConfig(c.config) + if cfg.ServerName == "" { + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority + } + cfg.ServerName = serverName + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + close(errChannel) + }() + select { + case err := <-errChannel: + if err != nil { + conn.Close() + return nil, nil, err + } + case <-ctx.Done(): + conn.Close() + return nil, nil, ctx.Err() + } + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + conn.Close() + return nil, nil, err + } + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +const alpnProtoStrH2 = "h2" + +func appendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{cloneTLSConfig(c)} + tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the provided root +// certificate authority certificate(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the provided root +// certificate authority certificate file(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +// +// This API is EXPERIMENTAL. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", + tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", +} + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 00000000..35bde103 --- /dev/null +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,606 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "net" + "time" + + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + + chainUnaryInts []UnaryClientInterceptor + chainStreamInts []StreamClientInterceptor + + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + // This is used by v1 balancer dial option WithBalancer to support v1 + // balancer, and also by WithBalancerName dial option. + balancerBuilder balancer.Builder + channelzParentID int64 + disableServiceConfig bool + disableRetry bool + disableHealthCheck bool + healthCheckFunc internal.HealthChecker + minConnectTimeout func() time.Duration + defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. + defaultServiceConfigRawJSON *string + // This is used by ccResolverWrapper to backoff between successive calls to + // resolver.ResolveNow(). The user will have no need to configure this, but + // we need to be able to configure this in tests. + resolveNowBackoff func(int) time.Duration + resolvers []resolver.Builder + withProxy bool +} + +// DialOption configures how we set up the connection. +type DialOption interface { + apply(*dialOptions) +} + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// This API is EXPERIMENTAL. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { + f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { + fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + return &funcDialOption{ + f: f, + } +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The corresponding memory allocation for this buffer will +// be twice the size to keep syscalls low. The default value for this buffer is +// 32KB. +// +// Zero will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. +func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s + }) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero will disable read buffer for +// a connection so data framer can access the underlying conn directly. +func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s + }) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + }) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + }) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will +// be supported throughout 1.x. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + }) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be +// supported throughout 1.x. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. Will be supported throughout 1.x. +func WithCompressor(cp Compressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.cp = cp + }) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func WithDecompressor(dc Decompressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.dc = dc + }) +} + +// WithBalancer returns a DialOption which sets a load balancer with the v1 API. +// Name resolver will be ignored if this DialOption is specified. +// +// Deprecated: use the new balancer APIs in balancer package and +// WithBalancerName. Will be removed in a future 1.x release. +func WithBalancer(b Balancer) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = &balancerWrapperBuilder{ + b: b, + } + }) +} + +// WithBalancerName sets the balancer that the ClientConn will be initialized +// with. Balancer registered with balancerName will be used. This function +// panics if no balancer was registered by balancerName. +// +// The balancer cannot be overridden by balancer option specified by service +// config. +// +// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig +// instead. Will be removed in a future 1.x release. +func WithBalancerName(balancerName string) DialOption { + builder := balancer.Get(balancerName) + if builder == nil { + panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) + } + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = builder + }) +} + +// WithServiceConfig returns a DialOption which has a channel to read the +// service configuration. +// +// Deprecated: service config should be received through name resolver or via +// WithDefaultServiceConfig, as specified at +// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be +// removed in a future 1.x release. +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.scChan = c + }) +} + +// WithConnectParams configures the dialer to use the provided ConnectParams. +// +// The backoff configuration specified as part of the ConnectParams overrides +// all defaults specified in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider +// using the backoff.DefaultConfig as a base, in cases where you want to +// override only a subset of the backoff configuration. +// +// This API is EXPERIMENTAL. +func WithConnectParams(p ConnectParams) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = internalbackoff.Exponential{Config: p.Backoff} + o.minConnectTimeout = func() time.Duration { + return p.MinConnectTimeout + } + }) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffConfig(b BackoffConfig) DialOption { + bc := backoff.DefaultConfig + bc.MaxDelay = b.MaxDelay + return withBackoff(internalbackoff.Exponential{Config: bc}) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs internalbackoff.Strategy) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = bs + }) +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + }) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Note that transport security is required unless WithInsecure is +// set. +func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.insecure = true + }) +} + +// WithNoProxy returns a DialOption which disables the use of proxies for this +// ClientConn. This is ignored if WithDialer or WithContextDialer are used. +// +// This API is EXPERIMENTAL. +func WithNoProxy() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.withProxy = false + }) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). This should not be used together +// with WithCredentialsBundle. +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = creds + }) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + }) +} + +// WithCredentialsBundle returns a DialOption to set a credentials bundle for +// the ClientConn.WithCreds. This should not be used together with +// WithTransportCredentials. +// +// This API is experimental. +func WithCredentialsBundle(b credentials.Bundle) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.CredsBundle = b + }) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext instead of Dial and context.WithTimeout +// instead. Will be supported throughout 1.x. +func WithTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.timeout = d + }) +} + +// WithContextDialer returns a DialOption that sets a dialer to create +// connections. If FailOnNonTempDialError() is set to true, and an error is +// returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.Dialer = f + }) +} + +func init() { + internal.WithHealthCheckFunc = withHealthCheckFunc +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Deprecated: use WithContextDialer instead. Will be supported throughout +// 1.x. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return WithContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, time.Until(deadline)) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.StatsHandler = h + }) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// This is an EXPERIMENTAL API. +func FailOnNonTempDialError(f bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + }) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UserAgent = s + }) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + if kp.Time < internal.KeepaliveMinPingTime { + grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + kp.Time = internal.KeepaliveMinPingTime + } + return newFuncDialOption(func(o *dialOptions) { + o.copts.KeepaliveParams = kp + }) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.unaryInt = f + }) +} + +// WithChainUnaryInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithUnaryInterceptor will always be prepended to the chain. +func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.streamInt = f + }) +} + +// WithChainStreamInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithStreamInterceptor will always be prepended to the chain. +func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header. This value only works with WithInsecure and has no +// effect if TransportCredentials are present. +func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a + }) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +// +// This API is EXPERIMENTAL. +func WithChannelzParentID(id int64) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParentID = id + }) +} + +// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +// +// Note that this dial option only disables service config from resolver. If +// default service config is provided, gRPC will use the default service config. +func WithDisableServiceConfig() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableServiceConfig = true + }) +} + +// WithDefaultServiceConfig returns a DialOption that configures the default +// service config, which will be used in cases where: +// +// 1. WithDisableServiceConfig is also used. +// 2. Resolver does not return a service config or if the resolver returns an +// invalid service config. +// +// This API is EXPERIMENTAL. +func WithDefaultServiceConfig(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultServiceConfigRawJSON = &s + }) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them. This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +// +// Retry support is currently disabled by default, but will be enabled by +// default in the future. Until then, it may be enabled by setting the +// environment variable "GRPC_GO_RETRY" to "on". +// +// This API is EXPERIMENTAL. +func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true + }) +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.MaxHeaderListSize = &s + }) +} + +// WithDisableHealthCheck disables the LB channel health checking for all +// SubConns of this ClientConn. +// +// This API is EXPERIMENTAL. +func WithDisableHealthCheck() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableHealthCheck = true + }) +} + +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.healthCheckFunc = f + }) +} + +func defaultDialOptions() dialOptions { + return dialOptions{ + disableRetry: !envconfig.Retry, + healthCheckFunc: internal.HealthCheckFunc, + copts: transport.ConnectOptions{ + WriteBufferSize: defaultWriteBufSize, + ReadBufferSize: defaultReadBufSize, + }, + resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, + withProxy: true, + } +} + +// withGetMinConnectDeadline specifies the function that clientconn uses to +// get minConnectDeadline. This can be used to make connection attempts happen +// faster/slower. +// +// For testing purpose only. +func withMinConnectDeadline(f func() time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.minConnectTimeout = f + }) +} + +// withResolveNowBackoff specifies the function that clientconn uses to backoff +// between successive calls to resolver.ResolveNow(). +// +// For testing purpose only. +func withResolveNowBackoff(f func(int) time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolveNowBackoff = f + }) +} + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// This API is EXPERIMENTAL. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 00000000..187adbb1 --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,24 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 00000000..195e8448 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,122 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// This package is EXPERIMENTAL. +package encoding + +import ( + "io" + "strings" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string + // EXPERIMENTAL: if a Compressor implements + // DecompressedSize(compressedBytes []byte) int, gRPC will call it + // to determine the size of the buffer allocated for the result of decompression. + // Return -1 to indicate unknown size. +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + if codec.Name() == "" { + panic("cannot register Codec with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + return registeredCodecs[contentSubtype] +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 00000000..66b97a6f --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "math" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} + +type cachedProtoBuffer struct { + lastMarshaledSize uint32 + proto.Buffer +} + +func capToMaxInt32(val int) uint32 { + if val > math.MaxInt32 { + return uint32(math.MaxInt32) + } + return uint32(val) +} + +func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { + protoMsg := v.(proto.Message) + newSlice := make([]byte, 0, cb.lastMarshaledSize) + + cb.SetBuf(newSlice) + cb.Reset() + if err := cb.Marshal(protoMsg); err != nil { + return nil, err + } + out := cb.Bytes() + cb.lastMarshaledSize = capToMaxInt32(len(out)) + return out, nil +} + +func (codec) Marshal(v interface{}) ([]byte, error) { + if pm, ok := v.(proto.Marshaler); ok { + // object can marshal itself, no need for buffer + return pm.Marshal() + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + out, err := marshal(v, cb) + + // put back buffer and lose the ref to the slice + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return out, err +} + +func (codec) Unmarshal(data []byte, v interface{}) error { + protoMsg := v.(proto.Message) + protoMsg.Reset() + + if pu, ok := protoMsg.(proto.Unmarshaler); ok { + // object can unmarshal itself, no need for buffer + return pu.Unmarshal(data) + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + cb.SetBuf(data) + err := cb.Unmarshal(protoMsg) + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return err +} + +func (codec) Name() string { + return Name +} + +var protoBufferPool = &sync.Pool{ + New: func() interface{} { + return &cachedProtoBuffer{ + Buffer: proto.Buffer{}, + lastMarshaledSize: 16, + } + }, +} diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod new file mode 100644 index 00000000..ecef1ab0 --- /dev/null +++ b/vendor/google.golang.org/grpc/go.mod @@ -0,0 +1,16 @@ +module google.golang.org/grpc + +go 1.11 + +require ( + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f + github.com/envoyproxy/go-control-plane v0.9.4 + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b + github.com/golang/mock v1.1.1 + github.com/golang/protobuf v1.3.3 + github.com/google/go-cmp v0.2.0 + golang.org/x/net v0.0.0-20190311183353-d8887717615a + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a + google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 +) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum new file mode 100644 index 00000000..0bf9f074 --- /dev/null +++ b/vendor/google.golang.org/grpc/go.sum @@ -0,0 +1,64 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 00000000..c8bb2be3 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import ( + "os" + + "google.golang.org/grpc/internal/grpclog" +) + +func init() { + SetLoggerV2(newLoggerV2()) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return grpclog.Logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + grpclog.Logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + grpclog.Logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + grpclog.Logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + grpclog.Logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + grpclog.Logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + grpclog.Logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + grpclog.Logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + grpclog.Logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + grpclog.Logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + grpclog.Logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calls os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + grpclog.Logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + grpclog.Logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...interface{}) { + grpclog.Logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + grpclog.Logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...interface{}) { + grpclog.Logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 00000000..ef06a482 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import "google.golang.org/grpc/internal/grpclog" + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + grpclog.Logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 00000000..23612b7c --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,214 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "io/ioutil" + "log" + "os" + "strconv" + + "google.golang.org/grpc/internal/grpclog" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + grpclog.Logger = l + grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + var m []*log.Logger + m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) + m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) + return &loggerT{m: m, v: v} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) +} + +func (g *loggerT) Info(args ...interface{}) { + g.m[infoLog].Print(args...) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.m[infoLog].Println(args...) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.m[infoLog].Printf(format, args...) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.m[warningLog].Print(args...) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.m[warningLog].Println(args...) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.m[warningLog].Printf(format, args...) +} + +func (g *loggerT) Error(args ...interface{}) { + g.m[errorLog].Print(args...) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.m[errorLog].Println(args...) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.m[errorLog].Printf(format, args...) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.m[fatalLog].Fatal(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.m[fatalLog].Fatalln(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.m[fatalLog].Fatalf(format, args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// This API is EXPERIMENTAL. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go new file mode 100644 index 00000000..b5bee483 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/client.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package health + +import ( + "context" + "fmt" + "io" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/status" +) + +var ( + backoffStrategy = backoff.DefaultExponential + backoffFunc = func(ctx context.Context, retries int) bool { + d := backoffStrategy.Backoff(retries) + timer := time.NewTimer(d) + select { + case <-timer.C: + return true + case <-ctx.Done(): + timer.Stop() + return false + } + } +) + +func init() { + internal.HealthCheckFunc = clientHealthCheck +} + +const healthCheckMethod = "/grpc.health.v1.Health/Watch" + +// This function implements the protocol defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error { + tryCnt := 0 + +retryConnection: + for { + // Backs off if the connection has failed in some way without receiving a message in the previous retry. + if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) { + return nil + } + tryCnt++ + + if ctx.Err() != nil { + return nil + } + setConnectivityState(connectivity.Connecting, nil) + rawS, err := newStream(healthCheckMethod) + if err != nil { + continue retryConnection + } + + s, ok := rawS.(grpc.ClientStream) + // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. + if !ok { + setConnectivityState(connectivity.Ready, nil) + return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) + } + + if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF { + // Stream should have been closed, so we can safely continue to create a new stream. + continue retryConnection + } + s.CloseSend() + + resp := new(healthpb.HealthCheckResponse) + for { + err = s.RecvMsg(resp) + + // Reports healthy for the LBing purposes if health check is not implemented in the server. + if status.Code(err) == codes.Unimplemented { + setConnectivityState(connectivity.Ready, nil) + return err + } + + // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. + if err != nil { + setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err)) + continue retryConnection + } + + // As a message has been received, removes the need for backoff for the next retry by resetting the try count. + tryCnt = 0 + if resp.Status == healthpb.HealthCheckResponse_SERVING { + setConnectivityState(connectivity.Ready, nil) + } else { + setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status)) + } + } + } +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go new file mode 100644 index 00000000..4c2a527e --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -0,0 +1,343 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/health/v1/health.proto + +package grpc_health_v1 + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 + HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", + 3: "SERVICE_UNKNOWN", +} + +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, + "SERVICE_UNKNOWN": 3, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} + +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e265fd9d4e077217, []int{1, 0} +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_e265fd9d4e077217, []int{0} +} + +func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b) +} +func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic) +} +func (m *HealthCheckRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HealthCheckRequest.Merge(m, src) +} +func (m *HealthCheckRequest) XXX_Size() int { + return xxx_messageInfo_HealthCheckRequest.Size(m) +} +func (m *HealthCheckRequest) XXX_DiscardUnknown() { + xxx_messageInfo_HealthCheckRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_HealthCheckRequest proto.InternalMessageInfo + +func (m *HealthCheckRequest) GetService() string { + if m != nil { + return m.Service + } + return "" +} + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_e265fd9d4e077217, []int{1} +} + +func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b) +} +func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic) +} +func (m *HealthCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HealthCheckResponse.Merge(m, src) +} +func (m *HealthCheckResponse) XXX_Size() int { + return xxx_messageInfo_HealthCheckResponse.Size(m) +} +func (m *HealthCheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_HealthCheckResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_HealthCheckResponse proto.InternalMessageInfo + +func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { + if m != nil { + return m.Status + } + return HealthCheckResponse_UNKNOWN +} + +func init() { + proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) + proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") +} + +func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_e265fd9d4e077217) } + +var fileDescriptor_e265fd9d4e077217 = []byte{ + // 297 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, + 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2, + 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f, + 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82, + 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, + 0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8, + 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5, + 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d, + 0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f, + 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8, + 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3, + 0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac, + 0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10, + 0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc, + 0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4, + 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, + 0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20, + 0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff, + 0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// HealthClient is the client API for Health service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type HealthClient interface { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) +} + +type healthClient struct { + cc grpc.ClientConnInterface +} + +func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) + if err != nil { + return nil, err + } + x := &healthWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Health_WatchClient interface { + Recv() (*HealthCheckResponse, error) + grpc.ClientStream +} + +type healthWatchClient struct { + grpc.ClientStream +} + +func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { + m := new(HealthCheckResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// HealthServer is the server API for Health service. +type HealthServer interface { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(*HealthCheckRequest, Health_WatchServer) error +} + +// UnimplementedHealthServer can be embedded to have forward compatible implementations. +type UnimplementedHealthServer struct { +} + +func (*UnimplementedHealthServer) Check(ctx context.Context, req *HealthCheckRequest) (*HealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") +} +func (*UnimplementedHealthServer) Watch(req *HealthCheckRequest, srv Health_WatchServer) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(HealthCheckRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) +} + +type Health_WatchServer interface { + Send(*HealthCheckResponse) error + grpc.ServerStream +} + +type healthWatchServer struct { + grpc.ServerStream +} + +func (x *healthWatchServer) Send(m *HealthCheckResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Health_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc/health/v1/health.proto", +} diff --git a/vendor/google.golang.org/grpc/health/regenerate.sh b/vendor/google.golang.org/grpc/health/regenerate.sh new file mode 100644 index 00000000..b11eccb2 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/regenerate.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2018 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux -o pipefail + +TMP=$(mktemp -d) + +function finish { + rm -rf "$TMP" +} +trap finish EXIT + +pushd "$TMP" +mkdir -p grpc/health/v1 +curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto > grpc/health/v1/health.proto + +protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/health/v1/*.proto +popd +rm -f grpc_health_v1/*.pb.go +cp "$TMP"/grpc/health/v1/*.pb.go grpc_health_v1/ + diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go new file mode 100644 index 00000000..2262607f --- /dev/null +++ b/vendor/google.golang.org/grpc/health/server.go @@ -0,0 +1,165 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +// Package health provides a service that exposes server's health and it must be +// imported to enable support for client-side health checks. +package health + +import ( + "context" + "sync" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + healthgrpc "google.golang.org/grpc/health/grpc_health_v1" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +// Server implements `service Health`. +type Server struct { + mu sync.RWMutex + // If shutdown is true, it's expected all serving status is NOT_SERVING, and + // will stay in NOT_SERVING. + shutdown bool + // statusMap stores the serving status of the services this Server monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus + updates map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus +} + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING}, + updates: make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus), + } +} + +// Check implements `service Health`. +func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.RLock() + defer s.mu.RUnlock() + if servingStatus, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: servingStatus, + }, nil + } + return nil, status.Error(codes.NotFound, "unknown service") +} + +// Watch implements `service Health`. +func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { + service := in.Service + // update channel is used for getting service status updates. + update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1) + s.mu.Lock() + // Puts the initial status to the channel. + if servingStatus, ok := s.statusMap[service]; ok { + update <- servingStatus + } else { + update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN + } + + // Registers the update channel to the correct place in the updates map. + if _, ok := s.updates[service]; !ok { + s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus) + } + s.updates[service][stream] = update + defer func() { + s.mu.Lock() + delete(s.updates[service], stream) + s.mu.Unlock() + }() + s.mu.Unlock() + + var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1 + for { + select { + // Status updated. Sends the up-to-date status to the client. + case servingStatus := <-update: + if lastSentStatus == servingStatus { + continue + } + lastSentStatus = servingStatus + err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus}) + if err != nil { + return status.Error(codes.Canceled, "Stream has ended.") + } + // Context done. Removes the update channel from the updates map. + case <-stream.Context().Done(): + return status.Error(codes.Canceled, "Stream has ended.") + } + } +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + defer s.mu.Unlock() + if s.shutdown { + grpclog.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus) + return + } + + s.setServingStatusLocked(service, servingStatus) +} + +func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { + s.statusMap[service] = servingStatus + for _, update := range s.updates[service] { + // Clears previous updates, that are not sent to the client, from the channel. + // This can happen if the client is not reading and the server gets flow control limited. + select { + case <-update: + default: + } + // Puts the most recent update to the channel. + update <- servingStatus + } +} + +// Shutdown sets all serving status to NOT_SERVING, and configures the server to +// ignore all future status changes. +// +// This changes serving status for all services. To set status for a particular +// services, call SetServingStatus(). +func (s *Server) Shutdown() { + s.mu.Lock() + defer s.mu.Unlock() + s.shutdown = true + for service := range s.statusMap { + s.setServingStatusLocked(service, healthpb.HealthCheckResponse_NOT_SERVING) + } +} + +// Resume sets all serving status to SERVING, and configures the server to +// accept all future status changes. +// +// This changes serving status for all services. To set status for a particular +// services, call SetServingStatus(). +func (s *Server) Resume() { + s.mu.Lock() + defer s.mu.Unlock() + s.shutdown = false + for service := range s.statusMap { + s.setServingStatusLocked(service, healthpb.HealthCheckResponse_SERVING) + } +} diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh new file mode 100644 index 00000000..7c7bcada --- /dev/null +++ b/vendor/google.golang.org/grpc/install_gae.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +TMP=$(mktemp -d /tmp/sdk.XXX) \ +&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \ +&& unzip -q $TMP.zip -d $TMP \ +&& export PATH="$PATH:$TMP/go_appengine" diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 00000000..8b735002 --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,77 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC +// and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O +// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the +// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as +// the status message of the RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 00000000..5fc0ee3d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "time" + + grpcbackoff "google.golang.org/grpc/backoff" + "google.golang.org/grpc/internal/grpcrand" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // Config contains all options to configure the backoff algorithm. + Config grpcbackoff.Config +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return bc.Config.BaseDelay + } + backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.Config.Multiplier + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go new file mode 100644 index 00000000..3a905d96 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -0,0 +1,46 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancerload defines APIs to parse server loads in trailers. The +// parsed loads are sent to balancers in DoneInfo. +package balancerload + +import ( + "google.golang.org/grpc/metadata" +) + +// Parser converts loads from metadata into a concrete type. +type Parser interface { + // Parse parses loads from metadata. + Parse(md metadata.MD) interface{} +} + +var parser Parser + +// SetParser sets the load parser. +// +// Not mutex-protected, should be called before any gRPC functions. +func SetParser(lr Parser) { + parser = lr +} + +// Parse calls parser.Read(). +func Parse(md metadata.MD) interface{} { + if parser == nil { + return nil + } + return parser.Parse(md) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go new file mode 100644 index 00000000..8b105167 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package binarylog implementation binary logging as defined in +// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. +package binarylog + +import ( + "fmt" + "os" + + "google.golang.org/grpc/grpclog" +) + +// Logger is the global binary logger. It can be used to get binary logger for +// each method. +type Logger interface { + getMethodLogger(methodName string) *MethodLogger +} + +// binLogger is the global binary logger for the binary. One of this should be +// built at init time from the configuration (environment variable or flags). +// +// It is used to get a methodLogger for each individual method. +var binLogger Logger + +// SetLogger sets the binarg logger. +// +// Only call this at init time. +func SetLogger(l Logger) { + binLogger = l +} + +// GetMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func GetMethodLogger(methodName string) *MethodLogger { + if binLogger == nil { + return nil + } + return binLogger.getMethodLogger(methodName) +} + +func init() { + const envStr = "GRPC_BINARY_LOG_FILTER" + configStr := os.Getenv(envStr) + binLogger = NewLoggerFromConfigString(configStr) +} + +type methodLoggerConfig struct { + // Max length of header and message. + hdr, msg uint64 +} + +type logger struct { + all *methodLoggerConfig + services map[string]*methodLoggerConfig + methods map[string]*methodLoggerConfig + + blacklist map[string]struct{} +} + +// newEmptyLogger creates an empty logger. The map fields need to be filled in +// using the set* functions. +func newEmptyLogger() *logger { + return &logger{} +} + +// Set method logger for "*". +func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { + if l.all != nil { + return fmt.Errorf("conflicting global rules found") + } + l.all = ml + return nil +} + +// Set method logger for "service/*". +// +// New methodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { + if _, ok := l.services[service]; ok { + return fmt.Errorf("conflicting service rules for service %v found", service) + } + if l.services == nil { + l.services = make(map[string]*methodLoggerConfig) + } + l.services[service] = ml + return nil +} + +// Set method logger for "service/method". +// +// New methodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.methods == nil { + l.methods = make(map[string]*methodLoggerConfig) + } + l.methods[method] = ml + return nil +} + +// Set blacklist method for "-service/method". +func (l *logger) setBlacklist(method string) error { + if _, ok := l.blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.blacklist == nil { + l.blacklist = make(map[string]struct{}) + } + l.blacklist[method] = struct{}{} + return nil +} + +// getMethodLogger returns the methodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each methodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func (l *logger) getMethodLogger(methodName string) *MethodLogger { + s, m, err := parseMethodName(methodName) + if err != nil { + grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil + } + if ml, ok := l.methods[s+"/"+m]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if _, ok := l.blacklist[s+"/"+m]; ok { + return nil + } + if ml, ok := l.services[s]; ok { + return newMethodLogger(ml.hdr, ml.msg) + } + if l.all == nil { + return nil + } + return newMethodLogger(l.all.hdr, l.all.msg) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go new file mode 100644 index 00000000..1ee00a39 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains exported variables/functions that are exported for testing +// only. +// +// An ideal way for this would be to put those in a *_test.go but in binarylog +// package. But this doesn't work with staticcheck with go module. Error was: +// "MdToMetadataProto not declared by package binarylog". This could be caused +// by the way staticcheck looks for files for a certain package, which doesn't +// support *_test.go files. +// +// Move those to binary_test.go when staticcheck is fixed. + +package binarylog + +var ( + // AllLogger is a logger that logs all headers/messages for all RPCs. It's + // for testing only. + AllLogger = NewLoggerFromConfigString("*") + // MdToMetadataProto converts metadata to a binary logging proto message. + // It's for testing only. + MdToMetadataProto = mdToMetadataProto + // AddrToProto converts an address to a binary logging proto message. It's + // for testing only. + AddrToProto = addrToProto +) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go new file mode 100644 index 00000000..be30d0e6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -0,0 +1,210 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "google.golang.org/grpc/grpclog" +) + +// NewLoggerFromConfigString reads the string and build a logger. It can be used +// to build a new logger and assign it to binarylog.Logger. +// +// Example filter config strings: +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. +// +// If two configs exist for one certain method or service, the one specified +// later overrides the previous config. +func NewLoggerFromConfigString(s string) Logger { + if s == "" { + return nil + } + l := newEmptyLogger() + methods := strings.Split(s, ",") + for _, method := range methods { + if err := l.fillMethodLoggerWithConfigString(method); err != nil { + grpclog.Warningf("failed to parse binary log config: %v", err) + return nil + } + } + return l +} + +// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds +// it to the right map in the logger. +func (l *logger) fillMethodLoggerWithConfigString(config string) error { + // "" is invalid. + if config == "" { + return errors.New("empty string is not a valid method binary logging config") + } + + // "-service/method", blacklist, no * or {} allowed. + if config[0] == '-' { + s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if m == "*" { + return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") + } + if suffix != "" { + return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") + } + if err := l.setBlacklist(s + "/" + m); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + // "*{h:256;m:256}" + if config[0] == '*' { + hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + s, m, suffix, err := parseMethodConfigAndSuffix(config) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + hdr, msg, err := parseHeaderMessageLengthConfig(suffix) + if err != nil { + return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) + } + if m == "*" { + if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } else { + if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } + return nil +} + +const ( + // TODO: this const is only used by env_config now. But could be useful for + // other config. Move to binarylog.go if necessary. + maxUInt = ^uint64(0) + + // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for + // expected output. + longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` + + // For suffix from above, "{h:123,m:123}". See test for expected output. + optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". + headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` + messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` + headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` +) + +var ( + longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) + headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) + messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) + headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) +) + +// Turn "service/method{h;m}" into "service", "method", "{h;m}". +func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { + // Regexp result: + // + // in: "p.s/m{h:123,m:123}", + // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, + match := longMethodConfigRegexp.FindStringSubmatch(c) + if match == nil { + return "", "", "", fmt.Errorf("%q contains invalid substring", c) + } + service = match[1] + method = match[2] + suffix = match[3] + return +} + +// Turn "{h:123;m:345}" into 123, 345. +// +// Return maxUInt if length is unspecified. +func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { + if c == "" { + return maxUInt, maxUInt, nil + } + // Header config only. + if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return hdrLenStr, 0, nil + } + return maxUInt, 0, nil + } + + // Message config only. + if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return 0, msgLenStr, nil + } + return 0, maxUInt, nil + } + + // Header and message config both. + if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { + // Both hdr and msg are specified, but one or two of them might be empty. + hdrLenStr = maxUInt + msgLenStr = maxUInt + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + if s := match[2]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + return hdrLenStr, msgLenStr, nil + } + return 0, 0, fmt.Errorf("%q contains invalid substring", c) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go new file mode 100644 index 00000000..160f6e86 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -0,0 +1,423 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "net" + "strings" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +type callIDGenerator struct { + id uint64 +} + +func (g *callIDGenerator) next() uint64 { + id := atomic.AddUint64(&g.id, 1) + return id +} + +// reset is for testing only, and doesn't need to be thread safe. +func (g *callIDGenerator) reset() { + g.id = 0 +} + +var idGen callIDGenerator + +// MethodLogger is the sub-logger for each method. +type MethodLogger struct { + headerMaxLen, messageMaxLen uint64 + + callID uint64 + idWithinCallGen *callIDGenerator + + sink Sink // TODO(blog): make this plugable. +} + +func newMethodLogger(h, m uint64) *MethodLogger { + return &MethodLogger{ + headerMaxLen: h, + messageMaxLen: m, + + callID: idGen.next(), + idWithinCallGen: &callIDGenerator{}, + + sink: defaultSink, // TODO(blog): make it plugable. + } +} + +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *MethodLogger) Log(c LogEntryConfig) { + m := c.toProto() + timestamp, _ := ptypes.TimestampProto(time.Now()) + m.Timestamp = timestamp + m.CallId = ml.callID + m.SequenceIdWithinCall = ml.idWithinCallGen.next() + + switch pay := m.Payload.(type) { + case *pb.GrpcLogEntry_ClientHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) + case *pb.GrpcLogEntry_ServerHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) + case *pb.GrpcLogEntry_Message: + m.PayloadTruncated = ml.truncateMessage(pay.Message) + } + + ml.sink.Write(m) +} + +func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { + if ml.headerMaxLen == maxUInt { + return false + } + var ( + bytesLimit = ml.headerMaxLen + index int + ) + // At the end of the loop, index will be the first entry where the total + // size is greater than the limit: + // + // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. + for ; index < len(mdPb.Entry); index++ { + entry := mdPb.Entry[index] + if entry.Key == "grpc-trace-bin" { + // "grpc-trace-bin" is a special key. It's kept in the log entry, + // but not counted towards the size limit. + continue + } + currentEntryLen := uint64(len(entry.Value)) + if currentEntryLen > bytesLimit { + break + } + bytesLimit -= currentEntryLen + } + truncated = index < len(mdPb.Entry) + mdPb.Entry = mdPb.Entry[:index] + return truncated +} + +func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { + if ml.messageMaxLen == maxUInt { + return false + } + if ml.messageMaxLen >= uint64(len(msgPb.Data)) { + return false + } + msgPb.Data = msgPb.Data[:ml.messageMaxLen] + return true +} + +// LogEntryConfig represents the configuration for binary log entry. +type LogEntryConfig interface { + toProto() *pb.GrpcLogEntry +} + +// ClientHeader configs the binary log entry to be a ClientHeader entry. +type ClientHeader struct { + OnClientSide bool + Header metadata.MD + MethodName string + Authority string + Timeout time.Duration + // PeerAddr is required only when it's on server side. + PeerAddr net.Addr +} + +func (c *ClientHeader) toProto() *pb.GrpcLogEntry { + // This function doesn't need to set all the fields (e.g. seq ID). The Log + // function will set the fields when necessary. + clientHeader := &pb.ClientHeader{ + Metadata: mdToMetadataProto(c.Header), + MethodName: c.MethodName, + Authority: c.Authority, + } + if c.Timeout > 0 { + clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &pb.GrpcLogEntry_ClientHeader{ + ClientHeader: clientHeader, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ServerHeader configs the binary log entry to be a ServerHeader entry. +type ServerHeader struct { + OnClientSide bool + Header metadata.MD + // PeerAddr is required only when it's on client side. + PeerAddr net.Addr +} + +func (c *ServerHeader) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &pb.GrpcLogEntry_ServerHeader{ + ServerHeader: &pb.ServerHeader{ + Metadata: mdToMetadataProto(c.Header), + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ClientMessage configs the binary log entry to be a ClientMessage entry. +type ClientMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ClientMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerMessage configs the binary log entry to be a ServerMessage entry. +type ServerMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message interface{} +} + +func (c *ServerMessage) toProto() *pb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclog.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &pb.GrpcLogEntry_Message{ + Message: &pb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. +type ClientHalfClose struct { + OnClientSide bool +} + +func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Payload: nil, // No payload here. + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerTrailer configs the binary log entry to be a ServerTrailer entry. +type ServerTrailer struct { + OnClientSide bool + Trailer metadata.MD + // Err is the status error. + Err error + // PeerAddr is required only when it's on client side and the RPC is trailer + // only. + PeerAddr net.Addr +} + +func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { + st, ok := status.FromError(c.Err) + if !ok { + grpclog.Info("binarylogging: error in trailer is not a status error") + } + var ( + detailsBytes []byte + err error + ) + stProto := st.Proto() + if stProto != nil && len(stProto.Details) != 0 { + detailsBytes, err = proto.Marshal(stProto) + if err != nil { + grpclog.Infof("binarylogging: failed to marshal status proto: %v", err) + } + } + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &pb.GrpcLogEntry_Trailer{ + Trailer: &pb.Trailer{ + Metadata: mdToMetadataProto(c.Trailer), + StatusCode: uint32(st.Code()), + StatusMessage: st.Message(), + StatusDetails: detailsBytes, + }, + }, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// Cancel configs the binary log entry to be a Cancel entry. +type Cancel struct { + OnClientSide bool +} + +func (c *Cancel) toProto() *pb.GrpcLogEntry { + ret := &pb.GrpcLogEntry{ + Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Payload: nil, + } + if c.OnClientSide { + ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// metadataKeyOmit returns whether the metadata entry with this key should be +// omitted. +func metadataKeyOmit(key string) bool { + switch key { + case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": + return true + case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + return false + } + return strings.HasPrefix(key, "grpc-") +} + +func mdToMetadataProto(md metadata.MD) *pb.Metadata { + ret := &pb.Metadata{} + for k, vv := range md { + if metadataKeyOmit(k) { + continue + } + for _, v := range vv { + ret.Entry = append(ret.Entry, + &pb.MetadataEntry{ + Key: k, + Value: []byte(v), + }, + ) + } + } + return ret +} + +func addrToProto(addr net.Addr) *pb.Address { + ret := &pb.Address{} + switch a := addr.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { + ret.Type = pb.Address_TYPE_IPV4 + } else if a.IP.To16() != nil { + ret.Type = pb.Address_TYPE_IPV6 + } else { + ret.Type = pb.Address_TYPE_UNKNOWN + // Do not set address and port fields. + break + } + ret.Address = a.IP.String() + ret.IpPort = uint32(a.Port) + case *net.UnixAddr: + ret.Type = pb.Address_TYPE_UNIX + ret.Address = a.String() + default: + ret.Type = pb.Address_TYPE_UNKNOWN + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh new file mode 100644 index 00000000..113d40cb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# Copyright 2018 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux -o pipefail + +TMP=$(mktemp -d) + +function finish { + rm -rf "$TMP" +} +trap finish EXIT + +pushd "$TMP" +mkdir -p grpc/binarylog/grpc_binarylog_v1 +curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto + +protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto +popd +rm -f ./grpc_binarylog_v1/*.pb.go +cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/ + diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go new file mode 100644 index 00000000..a2e7c346 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "sync" + "time" + + "github.com/golang/protobuf/proto" + pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/grpclog" +) + +var ( + defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). +) + +// SetDefaultSink sets the sink where binary logs will be written to. +// +// Not thread safe. Only set during initialization. +func SetDefaultSink(s Sink) { + if defaultSink != nil { + defaultSink.Close() + } + defaultSink = s +} + +// Sink writes log entry into the binary log sink. +type Sink interface { + // Write will be called to write the log entry into the sink. + // + // It should be thread-safe so it can be called in parallel. + Write(*pb.GrpcLogEntry) error + // Close will be called when the Sink is replaced by a new Sink. + Close() error +} + +type noopSink struct{} + +func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } + +// newWriterSink creates a binary log sink with the given writer. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// No buffer is done, Close() doesn't try to close the writer. +func newWriterSink(w io.Writer) *writerSink { + return &writerSink{out: w} +} + +type writerSink struct { + out io.Writer +} + +func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { + b, err := proto.Marshal(e) + if err != nil { + grpclog.Infof("binary logging: failed to marshal proto message: %v", err) + } + hdr := make([]byte, 4) + binary.BigEndian.PutUint32(hdr, uint32(len(b))) + if _, err := ws.out.Write(hdr); err != nil { + return err + } + if _, err := ws.out.Write(b); err != nil { + return err + } + return nil +} + +func (ws *writerSink) Close() error { return nil } + +type bufWriteCloserSink struct { + mu sync.Mutex + closer io.Closer + out *writerSink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + + writeStartOnce sync.Once + writeTicker *time.Ticker +} + +func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error { + // Start the write loop when Write is called. + fs.writeStartOnce.Do(fs.startFlushGoroutine) + fs.mu.Lock() + if err := fs.out.Write(e); err != nil { + fs.mu.Unlock() + return err + } + fs.mu.Unlock() + return nil +} + +const ( + bufFlushDuration = 60 * time.Second +) + +func (fs *bufWriteCloserSink) startFlushGoroutine() { + fs.writeTicker = time.NewTicker(bufFlushDuration) + go func() { + for range fs.writeTicker.C { + fs.mu.Lock() + fs.buf.Flush() + fs.mu.Unlock() + } + }() +} + +func (fs *bufWriteCloserSink) Close() error { + if fs.writeTicker != nil { + fs.writeTicker.Stop() + } + fs.mu.Lock() + fs.buf.Flush() + fs.closer.Close() + fs.out.Close() + fs.mu.Unlock() + return nil +} + +func newBufWriteCloserSink(o io.WriteCloser) Sink { + bufW := bufio.NewWriter(o) + return &bufWriteCloserSink{ + closer: o, + out: newWriterSink(bufW), + buf: bufW, + } +} + +// NewTempFileSink creates a temp file and returns a Sink that writes to this +// file. +func NewTempFileSink() (Sink, error) { + tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt") + if err != nil { + return nil, fmt.Errorf("failed to create temp file: %v", err) + } + return newBufWriteCloserSink(tempFile), nil +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go new file mode 100644 index 00000000..15dc7803 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/util.go @@ -0,0 +1,41 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "strings" +) + +// parseMethodName splits service and method from the input. It expects format +// "/service/method". +// +// TODO: move to internal/grpcutil. +func parseMethodName(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go new file mode 100644 index 00000000..9f6a0c12 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -0,0 +1,85 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package buffer provides an implementation of an unbounded buffer. +package buffer + +import "sync" + +// Unbounded is an implementation of an unbounded buffer which does not use +// extra goroutines. This is typically used for passing updates from one entity +// to another within gRPC. +// +// All methods on this type are thread-safe and don't block on anything except +// the underlying mutex used for synchronization. +// +// Unbounded supports values of any type to be stored in it by using a channel +// of `interface{}`. This means that a call to Put() incurs an extra memory +// allocation, and also that users need a type assertion while reading. For +// performance critical code paths, using Unbounded is strongly discouraged and +// defining a new type specific implementation of this buffer is preferred. See +// internal/transport/transport.go for an example of this. +type Unbounded struct { + c chan interface{} + mu sync.Mutex + backlog []interface{} +} + +// NewUnbounded returns a new instance of Unbounded. +func NewUnbounded() *Unbounded { + return &Unbounded{c: make(chan interface{}, 1)} +} + +// Put adds t to the unbounded buffer. +func (b *Unbounded) Put(t interface{}) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, t) + b.mu.Unlock() +} + +// Load sends the earliest buffered data, if any, onto the read channel +// returned by Get(). Users are expected to call this every time they read a +// value from the read channel. +func (b *Unbounded) Load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// Get returns a read channel on which values added to the buffer, via Put(), +// are sent on. +// +// Upon reading a value from this channel, users are expected to call Load() to +// send the next buffered value onto the channel if there is any. +func (b *Unbounded) Get() <-chan interface{} { + return b.c +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 00000000..e4252e5b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,739 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( + "fmt" + "sort" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/internal/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var ( + db dbWrapper + idGen idGenerator + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = int64(50) + curState int32 + maxTraceEntry = defaultMaxTraceEntry +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + if !IsOn() { + NewChannelzStorage() + atomic.StoreInt32(&curState, 1) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.CompareAndSwapInt32(&curState, 1, 1) +} + +// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). +// Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { + mu sync.RWMutex + DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { + d.mu.Lock() + d.DB = db + d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { + d.mu.RLock() + defer d.mu.RUnlock() + return d.DB +} + +// NewChannelzStorage initializes channelz data storage and id generator. +// +// This function returns a cleanup function to wait for all channelz state to be reset by the +// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests +// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen +// to remove some entity just register by the new test, since the id space is the same. +// +// Note: This function is exported for testing purpose only. User should not call +// it in most cases. +func NewChannelzStorage() (cleanup func() error) { + db.set(&channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + }) + idGen.reset() + return func() error { + var err error + cm := db.get() + if cm == nil { + return nil + } + for i := 0; i < 1000; i++ { + cm.mu.Lock() + if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { + cm.mu.Unlock() + // all things stored in the channelz map have been cleared. + return nil + } + cm.mu.Unlock() + time.Sleep(10 * time.Millisecond) + } + + cm.mu.Lock() + err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) + cm.mu.Unlock() + return err + } +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id, maxResults) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id, maxResults) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of the arg maxResults +// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID, maxResults) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { + return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { + return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { + return db.get().GetSocket(id) +} + +// GetServer returns the ServerMetric for the server (identified by id). +func GetServer(id int64) *ServerMetric { + return db.get().GetServer(id) +} + +// RegisterChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). pid = 0 means no parent. It returns the unique channelz tracking id +// assigned to this channel. +func RegisterChannel(c Channel, pid int64, ref string) int64 { + id := idGen.genID() + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + if pid == 0 { + db.get().addChannel(id, cn, true, pid, ref) + } else { + db.get().addChannel(id, cn, false, pid, ref) + } + return id +} + +// RegisterSubChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). It returns the unique channelz tracking id assigned to this subchannel. +func RegisterSubChannel(c Channel, pid int64, ref string) int64 { + if pid == 0 { + grpclog.ErrorDepth(0, "a SubChannel's parent id cannot be 0") + return 0 + } + id := idGen.genID() + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, + pid: pid, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + db.get().addSubChannel(id, sc, pid, ref) + return id +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +func RegisterServer(s Server, ref string) int64 { + id := idGen.genID() + svr := &server{ + refName: ref, + s: s, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + id: id, + } + db.get().addServer(id, svr) + return id +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +func RegisterListenSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.ErrorDepth(0, "a ListenSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addListenSocket(id, ls, pid, ref) + return id +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.ErrorDepth(0, "a NormalSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addNormalSocket(id, ns, pid, ref) + return id +} + +// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// channelz database. +func RemoveEntry(id int64) { + db.get().removeEntry(id) +} + +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added +// to the channel trace. +// The Parent field is optional. It is used for event that will be recorded in the entity's parent +// trace also. +type TraceEventDesc struct { + Desc string + Severity Severity + Parent *TraceEventDesc +} + +// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. +func AddTraceEvent(id int64, depth int, desc *TraceEventDesc) { + for d := desc; d != nil; d = d.Parent { + switch d.Severity { + case CtUNKNOWN: + grpclog.InfoDepth(depth+1, d.Desc) + case CtINFO: + grpclog.InfoDepth(depth+1, d.Desc) + case CtWarning: + grpclog.WarningDepth(depth+1, d.Desc) + case CtError: + grpclog.ErrorDepth(depth+1, d.Desc) + } + } + if getMaxTraceEntry() == 0 { + return + } + db.get().traceEvent(id, desc) +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + servers map[int64]*server + channels map[int64]*channel + subChannels map[int64]*subChannel + listenSockets map[int64]*listenSocket + normalSockets map[int64]*normalSocket +} + +func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c + c.servers[id] = s + c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { + c.mu.Lock() + cn.cm = c + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else { + c.findEntry(pid).addChild(id, cn) + } + c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { + c.mu.Lock() + sc.cm = c + sc.trace.cm = c + c.subChannels[id] = sc + c.findEntry(pid).addChild(id, sc) + c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls + c.findEntry(pid).addChild(id, ls) + c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns + c.findEntry(pid).addChild(id, ns) + c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to +// wait on the deletion of its children and until no other entity's channel trace references it. +// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully +// shutting down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + c.findEntry(id).triggerDelete() + c.mu.Unlock() +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + var v entry + var ok bool + if v, ok = c.channels[id]; ok { + return v + } + if v, ok = c.subChannels[id]; ok { + return v + } + if v, ok = c.servers[id]; ok { + return v + } + if v, ok = c.listenSockets[id]; ok { + return v + } + if v, ok = c.normalSockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { + var ok bool + if _, ok = c.normalSockets[id]; ok { + delete(c.normalSockets, id) + return + } + if _, ok = c.subChannels[id]; ok { + delete(c.subChannels, id) + return + } + if _, ok = c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return + } + if _, ok = c.listenSockets[id]; ok { + delete(c.listenSockets, id) + return + } + if _, ok = c.servers[id]; ok { + delete(c.servers, id) + return + } +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { + c.mu.Lock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + c.mu.Unlock() + return + } + childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *channel: + chanType = RefChannel + case *subChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&TraceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } + c.mu.Unlock() +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + cns := make([]*channel, 0, min(l, maxResults)) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var t []*ChannelMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if cn, ok := c.channels[v]; ok { + cns = append(cns, cn) + t = append(t, &ChannelMetric{ + NestedChans: copyMap(cn.nestedChans), + SubChans: copyMap(cn.subChans), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, cn := range cns { + t[i].ChannelData = cn.c.ChannelzMetric() + t[i].ID = cn.id + t[i].RefName = cn.refName + t[i].Trace = cn.trace.dumpData() + } + return t, end +} + +func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.servers)) + ids := make([]int64, 0, l) + ss := make([]*server, 0, min(l, maxResults)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var s []*ServerMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if svr, ok := c.servers[v]; ok { + ss = append(ss, svr) + s = append(s, &ServerMetric{ + ListenSockets: copyMap(svr.listenSockets), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, svr := range ss { + s[i].ServerData = svr.s.ChannelzMetric() + s[i].ID = svr.id + s[i].RefName = svr.refName + } + return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + // server with id doesn't exist. + c.mu.RUnlock() + return nil, true + } + svrskts := svr.sockets + l := int64(len(svrskts)) + ids := make([]int64, 0, l) + sks := make([]*normalSocket, 0, min(l, maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + count := int64(0) + var end bool + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if ns, ok := c.normalSockets[v]; ok { + sks = append(sks, ns) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + var s []*SocketMetric + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + s = append(s, sm) + } + return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { + cm := &ChannelMetric{} + var cn *channel + var ok bool + c.mu.RLock() + if cn, ok = c.channels[id]; !ok { + // channel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.NestedChans = copyMap(cn.nestedChans) + cm.SubChans = copyMap(cn.subChans) + // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when + // holding the lock to prevent potential data race. + chanCopy := cn.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = cn.id + cm.RefName = cn.refName + cm.Trace = cn.trace.dumpData() + return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { + cm := &SubChannelMetric{} + var sc *subChannel + var ok bool + c.mu.RLock() + if sc, ok = c.subChannels[id]; !ok { + // subchannel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.Sockets = copyMap(sc.sockets) + // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when + // holding the lock to prevent potential data race. + chanCopy := sc.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = sc.id + cm.RefName = sc.refName + cm.Trace = sc.trace.dumpData() + return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { + sm := &SocketMetric{} + c.mu.RLock() + if ls, ok := c.listenSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ls.s.ChannelzMetric() + sm.ID = ls.id + sm.RefName = ls.refName + return sm + } + if ns, ok := c.normalSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + return sm + } + c.mu.RUnlock() + return nil +} + +func (c *channelMap) GetServer(id int64) *ServerMetric { + sm := &ServerMetric{} + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + c.mu.RUnlock() + return nil + } + sm.ListenSockets = copyMap(svr.listenSockets) + c.mu.RUnlock() + sm.ID = svr.id + sm.RefName = svr.refName + sm.ServerData = svr.s.ChannelzMetric() + return sm +} + +type idGenerator struct { + id int64 +} + +func (i *idGenerator) reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *idGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 00000000..59c7bede --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,100 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + + "google.golang.org/grpc/internal/grpclog" +) + +// Info logs through grpclog.Info and adds a trace event if channelz is on. +func Info(id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtINFO, + }) + } else { + grpclog.InfoDepth(1, args...) + } +} + +// Infof logs through grpclog.Infof and adds a trace event if channelz is on. +func Infof(id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtINFO, + }) + } else { + grpclog.InfoDepth(1, msg) + } +} + +// Warning logs through grpclog.Warning and adds a trace event if channelz is on. +func Warning(id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) + } else { + grpclog.WarningDepth(1, args...) + } +} + +// Warningf logs through grpclog.Warningf and adds a trace event if channelz is on. +func Warningf(id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtWarning, + }) + } else { + grpclog.WarningDepth(1, msg) + } +} + +// Error logs through grpclog.Error and adds a trace event if channelz is on. +func Error(id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) + } else { + grpclog.ErrorDepth(1, args...) + } +} + +// Errorf logs through grpclog.Errorf and adds a trace event if channelz is on. +func Errorf(id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtError, + }) + } else { + grpclog.ErrorDepth(1, msg) + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go new file mode 100644 index 00000000..17c2274c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -0,0 +1,702 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if child + // list is not empty, then deletion from the database is on hold until the last + // child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child + // list is now empty. If both conditions are met, then delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { + idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race condition. + // For example, there could be a race between ClientConn.Close() info being propagated + // to addrConn and http2Client. ClientConn.Close() cancel the context and result + // in http2Client to error. The error info is then caught by transport monitor + // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, + // the addrConn will create a new transport. And when registering the new transport in + // channelz, its parent addrConn could have already been torn down and deleted + // from channelz tracking, and thus reach the code here. + grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + // ChannelData contains channel internal metric reported by the channel through + // ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this channel in the format of + // a map from nested channel channelz id to corresponding reference string. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this channel in the format of a + // map from subchannel channelz id to corresponding reference string. + SubChans map[int64]string + // Sockets tracks the socket type children of this channel in the format of a map + // from socket channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow channel having sockets directly, + // therefore, this is field is unused. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + // ChannelData contains subchannel internal metric reported by the subchannel + // through ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this subchannel in the format of + // a map from nested channel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have nested channels + // as children, therefore, this field is unused. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this subchannel in the format of a + // map from subchannel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have subchannels + // as children, therefore, this field is unused. + SubChans map[int64]string + // Sockets tracks the socket type children of this subchannel in the format of a map + // from socket channelz id to corresponding reference string. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { + // current connectivity state of the channel. + State connectivity.State + // The target this channel originally tried to connect to. May be absent + Target string + // The number of calls started on the channel. + CallsStarted int64 + // The number of calls that have completed with an OK status. + CallsSucceeded int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp time.Time +} + +// ChannelTrace stores traced events on a channel/subchannel and related info. +type ChannelTrace struct { + // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) + EventNum int64 + // CreationTime is the creation time of the trace. + CreationTime time.Time + // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the + // oldest one) + Events []*TraceEvent +} + +// TraceEvent represent a single trace event +type TraceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { + ChannelzMetric() *ChannelInternalMetric +} + +type dummyChannel struct{} + +func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { + return &ChannelInternalMetric{} +} + +type channel struct { + refName string + c Channel + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 +} + +func (c *channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *subChannel: + c.subChans[id] = v.refName + case *channel: + c.nestedChans[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *channel) getParentID() int64 { + return c.pid +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.pid != 0 { + c.cm.findEntry(c.pid).deleteChild(c.id) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *channel) deleteSelfFromMap() (delete bool) { + if c.getTraceRefCount() != 0 { + c.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + c.cm.deleteEntry(c.id) + c.trace.clear() +} + +func (c *channel) getChannelTrace() *channelTrace { + return c.trace +} + +func (c *channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *channel) getRefName() string { + return c.refName +} + +type subChannel struct { + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + traceRefCount int32 +} + +func (sc *subChannel) addChild(id int64, e entry) { + if v, ok := e.(*normalSocket); ok { + sc.sockets[id] = v.refName + } else { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *subChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *subChannel) getParentID() int64 { + return sc.pid +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *subChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.cm.findEntry(sc.pid).deleteChild(sc.id) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *subChannel) deleteSelfFromMap() (delete bool) { + if sc.getTraceRefCount() != 0 { + // free the grpc struct (i.e. addrConn) + sc.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *subChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + sc.cm.deleteEntry(sc.id) + sc.trace.clear() +} + +func (sc *subChannel) getChannelTrace() *channelTrace { + return sc.trace +} + +func (sc *subChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *subChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *subChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *subChannel) getRefName() string { + return sc.refName +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { + // ID is the channelz id of this socket. + ID int64 + // RefName is the human readable reference string of this socket. + RefName string + // SocketData contains socket internal metric reported by the socket through + // ChannelzMetric(). + SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { + // The number of streams that have been started. + StreamsStarted int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed int64 + // The number of messages successfully sent on this socket. + MessagesSent int64 + MessagesReceived int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp time.Time + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp time.Time + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp time.Time + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp time.Time + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 + // The locally bound address. + LocalAddr net.Addr + // The remote bound address. May be absent. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + RemoteName string + SocketOptions *SocketOptionData + Security credentials.ChannelzSecurityValue +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { + ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { + ls.cm.deleteEntry(ls.id) + ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *listenSocket) getParentID() int64 { + return ls.pid +} + +type normalSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { + ns.cm.deleteEntry(ns.id) + ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +func (ns *normalSocket) getParentID() int64 { + return ns.pid +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { + // ID is the channelz id of this server. + ID int64 + // RefName is the human readable reference string of this server. + RefName string + // ServerData contains server internal metric reported by the server through + // ChannelzMetric(). + ServerData *ServerInternalMetric + // ListenSockets tracks the listener socket type children of this server in the + // format of a map from socket channelz id to corresponding reference string. + ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { + // The number of incoming calls started on the server. + CallsStarted int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the server. + LastCallStartedTimestamp time.Time +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { + ChannelzMetric() *ServerInternalMetric +} + +type server struct { + refName string + s Server + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + id int64 + cm *channelMap +} + +func (s *server) addChild(id int64, e entry) { + switch v := e.(type) { + case *normalSocket: + s.sockets[id] = v.refName + case *listenSocket: + s.listenSockets[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.id) +} + +func (s *server) getParentID() int64 { + return 0 +} + +type tracedChannel interface { + getChannelTrace() *channelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +type channelTrace struct { + cm *channelMap + createdTime time.Time + eventCount int64 + mu sync.Mutex + events []*TraceEvent +} + +func (c *channelTrace) append(e *TraceEvent) { + c.mu.Lock() + if len(c.events) == getMaxTraceEntry() { + del := c.events[0] + c.events = c.events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.events = append(c.events, e) + c.eventCount++ + c.mu.Unlock() +} + +func (c *channelTrace) clear() { + c.mu.Lock() + for _, e := range c.events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUNKNOWN indicates unknown severity of a trace event. + CtUNKNOWN Severity = iota + // CtINFO indicates info level severity of a trace event. + CtINFO + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefChannel indicates the referenced entity is a Channel. + RefChannel RefChannelType = iota + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel +) + +func (c *channelTrace) dumpData() *ChannelTrace { + c.mu.Lock() + ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} + ct.Events = c.events[:len(c.events)] + c.mu.Unlock() + return ct +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go new file mode 100644 index 00000000..692dd618 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -0,0 +1,53 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { + Linger *unix.Linger + RecvTimeout *unix.Timeval + SendTimeout *unix.Timeval + TCPInfo *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { + if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { + s.Linger = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { + s.RecvTimeout = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { + s.SendTimeout = v + } + if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { + s.TCPInfo = v + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go new file mode 100644 index 00000000..79edbefc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -0,0 +1,44 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "sync" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(fd uintptr) { + once.Do(func() { + grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.") + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go new file mode 100644 index 00000000..fdf409d5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -0,0 +1,39 @@ +// +build linux,!appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" +) + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket interface{}) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go new file mode 100644 index 00000000..8864a081 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -0,0 +1,26 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c interface{}) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go new file mode 100644 index 00000000..ae6c8972 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -0,0 +1,38 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package envconfig contains grpc settings configured by environment variables. +package envconfig + +import ( + "os" + "strings" +) + +const ( + prefix = "GRPC_GO_" + retryStr = prefix + "RETRY" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" +) + +var ( + // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". + Retry = strings.EqualFold(os.Getenv(retryStr), "on") + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false") +) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go new file mode 100644 index 00000000..8c8e19fc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -0,0 +1,118 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog (internal) defines depth logging for grpc. +package grpclog + +// Logger is the logger used for the non-depth log functions. +var Logger LoggerV2 + +// DepthLogger is the logger used for the depth log functions. +var DepthLogger DepthLoggerV2 + +// InfoDepth logs to the INFO log at the specified depth. +func InfoDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { + Logger.Info(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +func WarningDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { + Logger.Warning(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +func ErrorDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { + Logger.Error(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +func FatalDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { + Logger.Fatal(args...) + } +} + +// LoggerV2 does underlying logging work for grpclog. +// This is a copy of the LoggerV2 defined in the external grpclog package. It +// is defined here to avoid a circular dependency. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// This is a copy of the DepthLoggerV2 defined in the external grpclog package. +// It is defined here to avoid a circular dependency. +// +// This API is EXPERIMENTAL. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go new file mode 100644 index 00000000..f6e0dc1d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...interface{}) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + } + Logger.Infof(format, args...) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + } + Logger.Warningf(format, args...) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + } + Logger.Errorf(format, args...) +} + +// Debugf does info logging at verbose level 2. +func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + if Logger.V(2) { + pl.Infof(format, args...) + } +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(prefix string) *PrefixLogger { + return &PrefixLogger{prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 00000000..200b115c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( + "math/rand" + "sync" + "time" +) + +var ( + r = rand.New(rand.NewSource(time.Now().UnixNano())) + mu sync.Mutex +) + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + mu.Lock() + res := r.Int63n(n) + mu.Unlock() + return res +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + mu.Lock() + res := r.Intn(n) + mu.Unlock() + return res +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + mu.Lock() + res := r.Float64() + mu.Unlock() + return res +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go new file mode 100644 index 00000000..fbe697c3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcsync implements additional synchronization primitives built upon +// the sync package. +package grpcsync + +import ( + "sync" + "sync/atomic" +) + +// Event represents a one-time event that may occur in the future. +type Event struct { + fired int32 + c chan struct{} + o sync.Once +} + +// Fire causes e to complete. It is safe to call multiple times, and +// concurrently. It returns true iff this call to Fire caused the signaling +// channel returned by Done to close. +func (e *Event) Fire() bool { + ret := false + e.o.Do(func() { + atomic.StoreInt32(&e.fired, 1) + close(e.c) + ret = true + }) + return ret +} + +// Done returns a channel that will be closed when Fire is called. +func (e *Event) Done() <-chan struct{} { + return e.c +} + +// HasFired returns true if Fire has been called. +func (e *Event) HasFired() bool { + return atomic.LoadInt32(&e.fired) == 1 +} + +// NewEvent returns a new, ready-to-use Event. +func NewEvent() *Event { + return &Event{c: make(chan struct{})} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go new file mode 100644 index 00000000..80b33cda --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/target.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides a bunch of utility functions to be used across the +// gRPC codebase. +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/resolver" +) + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// ParseTarget splits target into a resolver.Target struct containing scheme, +// authority and endpoint. +// +// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: +// target}. +func ParseTarget(target string) (ret resolver.Target) { + var ok bool + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") + if !ok { + return resolver.Target{Endpoint: target} + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 00000000..c6fbe8bb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,67 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +import ( + "context" + "time" + + "google.golang.org/grpc/connectivity" +) + +var ( + // WithHealthCheckFunc is set by dialoptions.go + WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. + BalancerUnregister func(name string) + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second + // NewRequestInfoContext creates a new context based on the argument context attaching + // the passed in RequestInfo to the new context. + NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context + // ParseServiceConfigForTesting is for creating a fake + // ClientConn for resolver testing only + ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult +) + +// HealthChecker defines the signature of the client-side LB channel health checking function. +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error + +const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. + CredsBundleModeFallback = "fallback" + // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer + // mode. + CredsBundleModeBalancer = "balancer" + // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" +) diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go new file mode 100644 index 00000000..c368db62 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -0,0 +1,441 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB +// addresses from SRV records. Must not be changed after init time. +var EnableSRVLookups = false + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultDNSSvrPort = "53" + golang = "GO" + // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var ( + errMissingAddr = errors.New("dns resolver: missing address") + + // Addresses ending with a colon that is supposed to be the separator + // between host and port is not allowed. E.g. "::" is a valid address as + // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with + // a colon as the host and port separator + errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +var ( + defaultResolver netResolver = net.DefaultResolver + // To prevent excessive re-resolution, we enforce a rate limit on DNS + // resolution requests. + minDNSResRate = 30 * time.Second +) + +var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, authority) + } +} + +var customAuthorityResolver = func(authority string) (netResolver, error) { + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + + authorityWithPort := net.JoinHostPort(host, port) + + return &net.Resolver{ + PreferGo: true, + Dial: customAuthorityDialler(authorityWithPort), + }, nil +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{} +} + +type dnsBuilder struct{} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint, defaultPort) + if err != nil { + return nil, err + } + + // IP address. + if ipAddr, ok := formatIP(host); ok { + addr := []resolver.Address{{Addr: ipAddr + ":" + port}} + cc.UpdateState(resolver.State{Addresses: addr}) + return deadResolver{}, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + if target.Authority == "" { + d.resolver = defaultResolver + } else { + d.resolver, err = customAuthorityResolver(target.Authority) + if err != nil { + return nil, err + } + } + + d.wg.Add(1) + go d.watcher() + d.ResolveNow(resolver.ResolveNowOptions{}) + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +type netResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +// deadResolver is a resolver that does nothing. +type deadResolver struct{} + +func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (deadResolver) Close() {} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + host string + port string + resolver netResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + for { + select { + case <-d.ctx.Done(): + return + case <-d.rn: + } + + state, err := d.lookup() + if err != nil { + d.cc.ReportError(err) + } else { + d.cc.UpdateState(*state) + } + + // Sleep to prevent excessive re-resolutions. Incoming resolution requests + // will be queued in d.rn. + t := time.NewTimer(minDNSResRate) + select { + case <-t.C: + case <-d.ctx.Done(): + t.Stop() + return + } + } +} + +func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { + if !EnableSRVLookups { + return nil, nil + } + var newAddrs []resolver.Address + _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + err = handleDNSError(err, "SRV") // may become nil + return nil, err + } + for _, s := range srvs { + lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + if err != nil { + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. + continue + } + return nil, err + } + for _, a := range lbAddrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + } + } + return newAddrs, nil +} + +var filterError = func(err error) error { + if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } + return err +} + +func handleDNSError(err error, lookupType string) error { + err = filterError(err) + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + grpclog.Infoln(err) + } + return err +} + +func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) + if err != nil { + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { + return &serviceconfig.ParseResult{Err: err} + } + return nil + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + // This is not an error; it is the equivalent of not having a service config. + return nil + } + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) + return d.cc.ParseServiceConfig(sc) +} + +func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { + var newAddrs []resolver.Address + addrs, err := d.resolver.LookupHost(d.ctx, d.host) + if err != nil { + err = handleDNSError(err, "A") + return nil, err + } + for _, a := range addrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs, nil +} + +func (d *dnsResolver) lookup() (*resolver.State, error) { + srv, srvErr := d.lookupSRV() + addrs, hostErr := d.lookupHost() + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } + state := &resolver.State{ + Addresses: append(addrs, srv...), + } + if !d.disableServiceConfig { + state.ServiceConfig = d.lookupTXT() + } + return state, nil +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string and default port, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. +// examples: +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + return "", "", errEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return grpcrand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + grpclog.Warningf("dns: error parsing service config json: %v", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + grpclog.Warningf("dns: error getting client hostname: %v", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go new file mode 100644 index 00000000..8783a8cf --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go @@ -0,0 +1,33 @@ +// +build go1.13 + +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import "net" + +func init() { + filterError = func(err error) error { + if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { + // The name does not exist; not an error. + return nil + } + return err + } +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go new file mode 100644 index 00000000..520d9229 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go new file mode 100644 index 00000000..43281a3e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -0,0 +1,114 @@ +// +build !appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level operating system +// stats/info. +package syscall + +import ( + "fmt" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/grpclog" +) + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +func GetCPUTime() int64 { + var ts unix.Timespec + if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { + grpclog.Fatal(err) + } + return ts.Nano() +} + +// Rusage is an alias for syscall.Rusage under linux non-appengine environment. +type Rusage syscall.Rusage + +// GetRusage returns the resource usage of current process. +func GetRusage() (rusage *Rusage) { + rusage = new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage)) + return +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + f := (*syscall.Rusage)(first) + l := (*syscall.Rusage)(latest) + var ( + utimeDiffs = l.Utime.Sec - f.Utime.Sec + utimeDiffus = l.Utime.Usec - f.Utime.Usec + stimeDiffs = l.Stime.Sec - f.Stime.Sec + stimeDiffus = l.Stime.Usec - f.Stime.Usec + ) + + uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 + sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + + return uTimeElapsed, sTimeElapsed +} + +// SetTCPUserTimeout sets the TCP user timeout on a connection's socket +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + // not a TCP connection. exit early + return nil + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + return fmt.Errorf("error getting raw connection: %v", err) + } + err = rawConn.Control(func(fd uintptr) { + err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) + }) + if err != nil { + return fmt.Errorf("error setting option on socket: %v", err) + } + + return nil +} + +// GetTCPUserTimeout gets the TCP user timeout on a connection's socket +func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) + return + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + err = fmt.Errorf("error getting raw connection: %v", err) + return + } + err = rawConn.Control(func(fd uintptr) { + opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) + }) + if err != nil { + err = fmt.Errorf("error getting option on socket: %v", err) + return + } + + return +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go new file mode 100644 index 00000000..d3fd9dab --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -0,0 +1,73 @@ +// +build !linux appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package syscall + +import ( + "net" + "sync" + "time" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once + +func log() { + once.Do(func() { + grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.") + }) +} + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +// It always returns 0 under non-linux or appengine environment. +func GetCPUTime() int64 { + log() + return 0 +} + +// Rusage is an empty struct under non-linux or appengine environment. +type Rusage struct{} + +// GetRusage is a no-op function under non-linux or appengine environment. +func GetRusage() (rusage *Rusage) { + log() + return nil +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. It a no-op function for non-linux or appengine environment. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + log() + return 0, 0 +} + +// SetTCPUserTimeout is a no-op function under non-linux or appengine environments +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + log() + return nil +} + +// GetTCPUserTimeout is a no-op function under non-linux or appengine environments +// a negative return value indicates the operation is not supported +func GetTCPUserTimeout(conn net.Conn) (int, error) { + log() + return -1, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go new file mode 100644 index 00000000..070680ed --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows will be increased + // to. TCP typically limits this to 4MB, but some systems go up to 16MB. + // Since this is only a limit, it is safe to make it optimistic. + bdpLimit = (1 << 20) * 16 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go new file mode 100644 index 00000000..ddee20b6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -0,0 +1,926 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "fmt" + "runtime" + "sync" + "sync/atomic" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { + it interface{} + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i interface{}) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() interface{} { + return il.head.it +} + +func (il *itemList) dequeue() interface{} { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// maxQueuedTransportResponseFrames is the most queued "transport response" +// frames we will buffer before preventing new reads from occurring on the +// transport. These are control frames sent in response to client requests, +// such as RST_STREAM due to bad headers or settings acks. +const maxQueuedTransportResponseFrames = 50 + +type cbItem interface { + isTransportResponseFrame() bool +} + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +func (*registerStream) isTransportResponseFrame() bool { return false } + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) error // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +func (h *headerFrame) isTransportResponseFrame() bool { + return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM +} + +type cleanupStream struct { + streamID uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + d []byte + // onEachWrite is called every time + // a part of d is written out. + onEachWrite func() +} + +func (*dataFrame) isTransportResponseFrame() bool { return false } + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*outgoingWindowUpdate) isTransportResponseFrame() bool { + return false // window updates are throttled by thresholds +} + +type incomingSettings struct { + ss []http2.Setting +} + +func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK + +type outgoingSettings struct { + ss []http2.Setting +} + +func (*outgoingSettings) isTransportResponseFrame() bool { return false } + +type incomingGoAway struct { +} + +func (*incomingGoAway) isTransportResponseFrame() bool { return false } + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn bool +} + +func (*goAway) isTransportResponseFrame() bool { return false } + +type ping struct { + ack bool + data [8]byte +} + +func (*ping) isTransportResponseFrame() bool { return true } + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +// controlBuffer is a way to pass information to loopy. +// Information is passed as specific struct types called control frames. +// A control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. +// It shouldn't be confused with an HTTP2 frame, although some of the control frames +// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +type controlBuffer struct { + ch chan struct{} + done <-chan struct{} + mu sync.Mutex + consumerWaiting bool + list *itemList + err error + + // transportResponseFrames counts the number of queued items that represent + // the response of an action initiated by the peer. trfChan is created + // when transportResponseFrames >= maxQueuedTransportResponseFrames and is + // closed and nilled when transportResponseFrames drops below the + // threshold. Both fields are protected by mu. + transportResponseFrames int + trfChan atomic.Value // *chan struct{} +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + ch: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +// throttle blocks if there are too many incomingSettings/cleanupStreams in the +// controlbuf. +func (c *controlBuffer) throttle() { + ch, _ := c.trfChan.Load().(*chan struct{}) + if ch != nil { + select { + case <-*ch: + case <-c.done: + } + } +} + +func (c *controlBuffer) put(it cbItem) error { + _, err := c.executeAndPut(nil, it) + return err +} + +func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if f != nil { + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + } + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + if it.isTransportResponseFrame() { + c.transportResponseFrames++ + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are adding the frame that puts us over the threshold; create + // a throttling channel. + ch := make(chan struct{}) + c.trfChan.Store(&ch) + } + } + c.mu.Unlock() + if wakeUp { + select { + case c.ch <- struct{}{}: + default: + } + } + return true, nil +} + +// Note argument f should never be nil. +func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + c.mu.Unlock() + return true, nil +} + +func (c *controlBuffer) get(block bool) (interface{}, error) { + for { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return nil, c.err + } + if !c.list.isEmpty() { + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Load().(*chan struct{}) + close(*ch) + c.trfChan.Store((*chan struct{})(nil)) + } + c.transportResponseFrames-- + } + c.mu.Unlock() + return h, nil + } + if !block { + c.mu.Unlock() + return nil, nil + } + c.consumerWaiting = true + c.mu.Unlock() + select { + case <-c.ch: + case <-c.done: + c.finish() + return nil, ErrConnClosing + } + } +} + +func (c *controlBuffer) finish() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = ErrConnClosing + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + hdr, ok := head.it.(*headerFrame) + if !ok { + continue + } + if hdr.onOrphaned != nil { // It will be nil on the server-side. + hdr.onOrphaned(ErrConnClosing) + } + } + c.mu.Unlock() +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resemebling to a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + // estdStreams is map of all established streams that are not cleaned-up yet. + // On client-side, this is all streams whose headers were sent out. + // On server-side, this is all streams whose headers were received. + estdStreams map[uint32]*outStream // Established streams. + // activeStreams is a linked-list of all streams that have data to send and some + // stream-level flow control quota. + // Each of these streams internally have a list of data items(and perhaps trailers + // on the server-side) to be sent out. + activeStreams *outStreamList + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the activeStreams linked-list. +// This results in writing of HTTP2 frames into an underlying write buffer. +// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. +// As an optimization, to increase the batch size for each flush, loopy yields the processor, once +// if the batch size is too low to give stream goroutines a chance to fill it up. +func (l *loopyWriter) run() (err error) { + defer func() { + if err == ErrConnClosing { + // Don't log ErrConnClosing as error since it happens + // 1. When the connection is closed by some other known issue. + // 2. User closed the connection. + // 3. A graceful close of connection. + infof("transport: loopyWriter.run returning. %v", err) + err = nil + } + }() + for { + it, err := l.cbuf.get(true) + if err != nil { + return err + } + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + gosched := true + hasdata: + for { + it, err := l.cbuf.get(false) + if err != nil { + return err + } + if it != nil { + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + continue hasdata + } + isEmpty, err := l.processData() + if err != nil { + return err + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return nil + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return nil + } + } + return nil +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + if err := l.applySettings(s.ss); err != nil { + return err + } + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) error { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str + return nil +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { + warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + str.itl.enqueue(h) + return l.originateStream(str) +} + +func (l *loopyWriter) originateStream(str *outStream) error { + hdr := str.itl.dequeue().(*headerFrame) + if err := hdr.initStream(str.id); err != nil { + if err == ErrConnClosing { + return err + } + // Other errors(errStreamDrain) need not close transport. + return nil + } + if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err) + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) error { + str, ok := l.estdStreams[df.streamID] + if !ok { + return nil + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } + return nil +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { + o.resp <- l.sendQuota + return nil +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { + return ErrConnClosing + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + return ErrConnClosing + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i interface{}) error { + switch i := i.(type) { + case *incomingWindowUpdate: + return l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *registerStream: + return l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + return l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + return l.outFlowControlSizeRequestHandler(i) + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) error { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) + } + } + return nil +} + +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() // Remove the first stream. + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the + // maximum possilbe HTTP2 frame size. + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() // remove the empty data item from stream + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, nil + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + var ( + idx int + buf []byte + ) + if len(dataItem.h) != 0 { // data header has not been written out yet. + buf = dataItem.h + } else { + idx = 1 + buf = dataItem.d + } + size := http2MaxFrameLen + if len(buf) < size { + size = len(buf) + } + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. + str.state = waitingOnStreamQuota + return false, nil + } else if strQuota < size { + size = strQuota + } + + if l.sendQuota < uint32(size) { // connection-level flow control. + size = int(l.sendQuota) + } + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // If this is the last data message on this stream and all of it can be written in this iteration. + if dataItem.endStream && size == len(buf) { + // buf contains either data or it contains header but data is empty. + if idx == 1 || len(dataItem.d) == 0 { + endStream = true + } + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + return false, err + } + buf = buf[size:] + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + if idx == 0 { + dataItem.h = buf + } else { + dataItem.d = buf + } + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 00000000..9fa306b2 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,49 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "math" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) +) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go new file mode 100644 index 00000000..f262edd8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -0,0 +1,217 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + w := &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } + w.replenish = w.realReplenish + return w +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) realReplenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) uint32 { + f.mu.Lock() + d := n - f.limit + f.limit = n + f.mu.Unlock() + return d +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go new file mode 100644 index 00000000..fc44e976 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -0,0 +1,462 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := contentSubtype(contentType) + if !validContentType { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + metakv := []string{"content-type", contentType} + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats stats.Handler +} + +func (ht *serverHandlerTransport) Close() error { + ht.closeOnce.Do(ht.closeCloseChanOnce) + return nil +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case <-ht.closedCh: + return ErrConnClosing + case ht.writes <- fn: + return nil + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } + + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + + if err == nil { // transport has not been closed + if ht.stats != nil { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + } + ht.Close() + return err +} + +// writePendingHeaders sets common and custom headers on the first +// write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { + ht.writeCommonHeaders(s) + ht.writeCustomHeaders(s) +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", ht.contentType) + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +// writeCustomHeaders sets custom headers set on the stream via SetHeader +// on the first write call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + + s.hdrMu.Lock() + for k, vv := range s.header { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, encodeMetadataHeader(k, v)) + } + } + + s.hdrMu.Unlock() +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + headersWritten := s.updateHeaderSent() + return ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + ht.rw.Write(hdr) + ht.rw.Write(data) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + if err := s.SetHeader(md); err != nil { + return err + } + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) + + if err == nil { + if ht.stats != nil { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) + } + } + return err +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := ht.req.Context() + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(ctx, ht.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // requestOver is closed when the status has been written via WriteStatus. + requestOver := make(chan struct{}) + go func() { + select { + case <-requestOver: + case <-ht.closedCh: + case <-ht.req.Context().Done(): + } + cancel() + ht.Close() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + s.ctx = peer.NewContext(ctx, pr) + if ht.stats != nil { + s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: ht.RemoteAddr(), + Compression: s.recvCompress, + } + ht.stats.HandleRPC(s.ctx, inHeader) + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) + buf = buf[n:] + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn := <-ht.writes: + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain() { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return status.Error(code, se.Error()) + } + } + if strings.Contains(err.Error(), "body closed by handler") { + return status.Error(codes.Canceled, err.Error()) + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go new file mode 100644 index 00000000..1cc586f7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -0,0 +1,1462 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "fmt" + "io" + "math" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// clientConnectionCounter counts the number of connections a client has +// initiated (equal to the number of http2Clients created). Must be accessed +// atomically. +var clientConnectionCounter uint64 + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + md interface{} + conn net.Conn // underlying communication channel + loopy *loopyWriter + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + perRPCCreds []credentials.PerRPCCredentials + + kp keepalive.ClientParameters + keepaliveEnabled bool + + statsHandler stats.Handler + + initialWindowSize int32 + + // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE + maxSendHeaderListSize *uint32 + + bdpEst *bdpEstimator + // onPrefaceReceipt is a callback that client transport calls upon + // receiving server preface to signal that a succefull HTTP2 + // connection was established. + onPrefaceReceipt func() + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + nextID uint32 + + mu sync.Mutex // guard the following variables + state transportState + activeStreams map[uint32]*Stream + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason + // A condition variable used to signal when the keepalive goroutine should + // go dormant. The condition for dormancy is based on the number of active + // streams and the `PermitWithoutStream` keepalive client parameter. And + // since the number of active streams is guarded by the above mutex, we use + // the same for this condition variable as well. + kpDormancyCond *sync.Cond + // A boolean to track whether the keepalive goroutine is dormant or not. + // This is checked before attempting to signal the above condition + // variable. + kpDormant bool + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + + onGoAway func(GoAwayReason) + onClose func() + + bufferPool *bufferPool + + connectionID uint64 +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { + if fn != nil { + return fn(ctx, addr) + } + return (&net.Dialer{}).DialContext(ctx, "tcp", addr) +} + +func isTemporary(err error) bool { + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + } + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + keepaliveEnabled := false + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + keepaliveEnabled = true + } + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + transportCreds := opts.TransportCredentials + perRPCCreds := opts.PerRPCCredentials + + if b := opts.CredsBundle; b != nil { + if t := b.TransportCredentials(); t != nil { + transportCreds = t + } + if t := b.PerRPCCredentials(); t != nil { + perRPCCreds = append(perRPCCreds, t) + } + } + if transportCreds != nil { + scheme = "https" + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + isSecure = true + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + writeBufSize := opts.WriteBufferSize + readBufSize := opts.ReadBufferSize + maxHeaderListSize := defaultClientMaxHeaderListSize + if opts.MaxHeaderListSize != nil { + maxHeaderListSize = *opts.MaxHeaderListSize + } + t := &http2Client{ + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + md: addr.Metadata, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + perRPCCreds: perRPCCreds, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + onPrefaceReceipt: onPrefaceReceipt, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + czData: new(channelzData), + onGoAway: onGoAway, + onClose: onClose, + keepaliveEnabled: keepaliveEnabled, + bufferPool: newBufferPool(), + } + t.controlBuf = newControlBuffer(t.ctxDone) + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + } + if t.keepaliveEnabled { + t.kpDormancyCond = sync.NewCond(&t.mu) + go t.keepalive() + } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + } + if n != len(clientPreface) { + t.Close() + return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + var ss []http2.Setting + + if t.initialWindowSize != defaultWindowSize { + ss = append(ss, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } + if opts.MaxHeaderListSize != nil { + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *opts.MaxHeaderListSize, + }) + } + err = t.framer.fr.WriteSettings(ss...) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + } + } + + t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) + + if err := t.framer.writer.Flush(); err != nil { + return nil, err + } + go func() { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) + err := t.loopy.run() + if err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } + // If it's a connection error, let reader goroutine handle it + // since there might be data in the buffers. + if _, ok := err.(net.Error); !ok { + t.conn.Close() + } + close(t.writerDone) + }() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + ct: t, + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + } + s.wq = newWriteQuota(defaultWriteQuota, s.done) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, + closeStream: func(err error) { + t.CloseStream(s, err) + }, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + return s +} + +func (t *http2Client) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, + } +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + ri := credentials.RequestInfo{ + Method: callHdr.Method, + AuthInfo: t.authInfo, + } + ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) + authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) + if err != nil { + return nil, err + } + callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) + if err != nil { + return nil, err + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.PreviousAttempts > 0 { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } + + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := time.Until(dl) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + var k string + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = v + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) + } + } + } + if md, ok := t.md.(*metadata.MD); ok { + for k, vv := range *md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { + return "" + } + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + if len(t.perRPCCreds) == 0 { + return nil, nil + } + authData := map[string]string{} + for _, c := range t.perRPCCreds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if _, ok := status.FromError(err); ok { + return nil, err + } + + return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + var callAuthData map[string]string + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if !t.isSecure && callCreds.RequireTransportSecurity() { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, status.Errorf(codes.Internal, "transport: %v", err) + } + callAuthData = make(map[string]string, len(data)) + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + ctx = peer.NewContext(ctx, t.getPeer()) + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + return nil, err + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + close(s.headerChan) + } + } + hdr := &headerFrame{ + hf: headerFields, + endStream: false, + initStream: func(id uint32) error { + t.mu.Lock() + if state := t.state; state != reachable { + t.mu.Unlock() + // Do a quick cleanup. + err := error(errStreamDrain) + if state == closing { + err = ErrConnClosing + } + cleanup(err) + return err + } + t.activeStreams[id] = s + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + // If the keepalive goroutine has gone dormant, wake it up. + if t.kpDormant { + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + return nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + checkForStreamQuota := func(it interface{}) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + h := it.(*headerFrame) + h.streamID = t.nextID + t.nextID += 2 + s.id = h.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + var hdrListSizeErr error + checkForHeaderListSize := func(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) + return false + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + if !checkForStreamQuota(it) { + return false + } + if !checkForHeaderListSize(it) { + return false + } + return true + }, hdr) + if err != nil { + return nil, err + } + if success { + break + } + if hdrListSizeErr != nil { + return nil, hdrListSizeErr + } + firstTry = false + select { + case <-ch: + case <-s.ctx.Done(): + return nil, ContextErr(s.ctx.Err()) + case <-t.goAway: + return nil, errStreamDrain + case <-t.ctx.Done(): + return nil, ErrConnClosing + } + } + if t.statsHandler != nil { + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + t.statsHandler.HandleRPC(s.ctx, outHeader) + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. If multiple closeStream calls + // happen simultaneously, wait for the first to finish. + <-s.done + return + } + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata + } + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.noHeaders = true + close(s.headerChan) + } + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func(interface{}) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) + // This will unblock write. + close(s.done) +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +// +// This method blocks until the addrConn that initiated this transport is +// re-connected. This happens because t.onClose() begins reconnect logic at the +// addrConn level and blocks until the addrConn is successfully connected. +func (t *http2Client) Close() error { + t.mu.Lock() + // Make sure we only Close once. + if t.state == closing { + t.mu.Unlock() + return nil + } + // Call t.onClose before setting the state to closing to prevent the client + // from attempting to create new streams ASAP. + t.onClose() + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + if t.kpDormant { + // If the keepalive goroutine is blocked on this condition variable, we + // should unblock it so that the goroutine eventually exits. + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Notify all active streams. + for _, s := range streams { + t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) + } + if t.statsHandler != nil { + connEnd := &stats.ConnEnd{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connEnd) + } + return err +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() { + t.mu.Lock() + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { + t.mu.Unlock() + return + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + return + } + t.controlBuf.put(&incomingGoAway{}) +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + return errStreamDone + } + } else if s.getState() != streamActive { + return errStreamDone + } + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + } + if hdr != nil || data != nil { // If it's not an empty data frame. + // Add some data to grpc message header so that we can equally + // distribute bytes across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + df.h, df.d = hdr, data + // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler. + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return err + } + } + return t.controlBuf.put(df) +} + +func (t *http2Client) getStream(f http2.Frame) *Stream { + t.mu.Lock() + s := t.activeStreams[f.Header().StreamID] + t.mu.Unlock() + return s +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() + updateIWS := func(interface{}) bool { + t.initialWindowSize = int32(n) + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s := t.getStream(f) + if s == nil { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s := t.getStream(f) + if s == nil { + return + } + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { + warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + statusCode = codes.Unknown + } + if statusCode == codes.Canceled { + if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { + // Our deadline was already exceeded, and that was likely the cause + // of this cancelation. Alter the status code accordingly. + statusCode = codes.DeadlineExceeded + } + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + if f.IsAck() { + return + } + var maxStreams *uint32 + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + maxStreams = new(uint32) + *maxStreams = s.Val + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams != nil { + updateStreamQuota := func() { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } + id := f.LastStreamID + if id > 0 && id%2 != 1 { + t.mu.Unlock() + t.Close() + return + } + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close() + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + t.controlBuf.put(&incomingGoAway{}) + // Notify the clientconn about the GOAWAY before we set the state to + // draining, to allow the client to stop attempting to create streams + // before disallowing new streams on this connection. + t.onGoAway(t.goAwayReason) + t.state = draining + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + atomic.StoreUint32(&stream.unprocessed, 1) + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } + } + t.prevGoAwayID = id + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutext to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = GoAwayTooManyPings + } + } +} + +func (t *http2Client) GetGoAwayReason() GoAwayReason { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s := t.getStream(frame) + if s == nil { + return + } + endStream := frame.StreamEnded() + atomic.StoreUint32(&s.bytesReceived, 1) + initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 + + if !initialHeader && !endStream { + // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. + st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") + t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) + return + } + + state := &decodeState{} + // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode. + state.data.isGRPC = !initialHeader + if err := state.decodeHeader(frame); err != nil { + t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream) + return + } + + isHeader := false + defer func() { + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: s.header.Copy(), + Compression: s.recvCompress, + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: s.trailer.Copy(), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + }() + + // If headerChan hasn't been closed yet + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true + if !endStream { + // HEADERS frame block carries a Response-Headers. + isHeader = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + s.recvCompress = state.data.encoding + if len(state.data.mdata) > 0 { + s.header = state.data.mdata + } + } else { + // HEADERS frame block carries a Trailers-Only. + s.noHeaders = true + } + close(s.headerChan) + } + + if !endStream { + return + } + + // if client received END_STREAM from server while stream was still active, send RST_STREAM + rst := s.getState() == streamActive + t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true) +} + +// reader runs as a separate goroutine in charge of reading data from network +// connection. +// +// TODO(zhaoq): currently one reader per transport. Investigate whether this is +// optimal. +// TODO(zhaoq): Check the validity of the incoming frame sequence. +func (t *http2Client) reader() { + defer close(t.readerDone) + // Check the validity of server preface. + frame, err := t.framer.fr.ReadFrame() + if err != nil { + t.Close() // this kicks off resetTransport, so must be last before return + return + } + t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + t.Close() // this kicks off resetTransport, so must be last before return + return + } + t.onPrefaceReceipt() + t.handleSettings(sf, true) + + // loop to keep reading incoming messages on this transport. + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + code := http2ErrConvTab[se.Code] + msg := t.framer.fr.ErrorDetail().Error() + t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) + } + continue + } else { + // Transport error. + t.Close() + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame, false) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } +} + +func minTime(a, b time.Duration) time.Duration { + if a < b { + return a + } + return b +} + +// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + timeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were here. + outstandingPing = false + // Next timer should fire at kp.Time seconds from lastRead time. + timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && timeoutLeft <= 0 { + t.Close() + return + } + t.mu.Lock() + if t.state == closing { + // If the transport is closing, we should exit from the + // keepalive goroutine here. If not, we could have a race + // between the call to Signal() from Close() and the call to + // Wait() here, whereby the keepalive goroutine ends up + // blocking on the condition variable which will never be + // signalled again. + t.mu.Unlock() + return + } + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // If a ping was sent out previously (because there were active + // streams at that point) which wasn't acked and its timeout + // hadn't fired, but we got here and are about to go dormant, + // we should make sure that we unconditionally send a ping once + // we awaken. + outstandingPing = false + t.kpDormant = true + t.kpDormancyCond.Wait() + } + t.kpDormant = false + t.mu.Unlock() + + // We get here either because we were dormant and a new stream was + // created which unblocked the Wait() call, or because the + // keepalive timer expired. In both cases, we need to send a ping. + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + timeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, timeoutLeft) + timeoutLeft -= sleepDuration + timer.Reset(sleepDuration) + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + +func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } + +func (t *http2Client) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go new file mode 100644 index 00000000..fa33ffb1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -0,0 +1,1251 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. + ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. + ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") +) + +// serverConnectionCounter counts the number of connections a server has seen +// (equal to the number of http2Servers created). Must be accessed atomically. +var serverConnectionCounter uint64 + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + remoteAddr net.Addr + localAddr net.Addr + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + stats stats.Handler + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + maxSendHeaderListSize *uint32 + + mu sync.Mutex // guard the following + + // drainChan is initialized when drain(...) is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czData *channelzData + bufferPool *bufferPool + + connectionID uint64 +} + +// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is +// returned if something goes wrong. +func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } + framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + // Send initial settings as connection preface to client. + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if config.MaxHeaderListSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *config.MaxHeaderListSize, + }) + } + if config.HeaderTableSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingHeaderTableSize, + Val: *config.HeaderTableSize, + }) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + done := make(chan struct{}) + t := &http2Server{ + ctx: context.Background(), + done: done, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + czData: new(channelzData), + bufferPool: newBufferPool(), + } + t.controlBuf = newControlBuffer(t.done) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.stats != nil { + t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} + t.stats.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + } + + t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) + + t.framer.writer.Flush() + + defer func() { + if err != nil { + t.Close() + } + }() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, err + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + if err := t.loopy.run(); err != nil { + errorf("transport: loopyWriter.run returning. Err: %v", err) + } + t.conn.Close() + close(t.writerDone) + }() + go t.keepalive() + return t, nil +} + +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { + streamID := frame.Header().StreamID + state := &decodeState{ + serverSide: true, + } + if err := state.decodeHeader(frame); err != nil { + if se, ok := status.FromError(err); ok { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: statusCodeConvTab[se.Code()], + onWrite: func() {}, + }) + } + return false + } + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + recvCompress: state.data.encoding, + method: state.data.method, + contentSubtype: state.data.contentSubtype, + } + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if state.data.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout) + } else { + s.ctx, s.cancel = context.WithCancel(t.ctx) + } + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. + if len(state.data.mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata) + } + if state.data.statsTags != nil { + s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags) + } + if state.data.statsTrace != nil { + s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace) + } + if t.inTapHandle != nil { + var err error + info := &tap.Info{ + FullMethodName: state.data.method, + } + s.ctx, err = t.inTapHandle(s.ctx, info) + if err != nil { + warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + s.cancel() + return false + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + s.cancel() + return false + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + s.cancel() + return false + } + if streamID%2 != 1 || streamID <= t.maxStreamID { + t.mu.Unlock() + // illegal gRPC stream id. + errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + s.cancel() + return true + } + t.maxStreamID = streamID + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctx = traceCtx(s.ctx, s.method) + if t.stats != nil { + s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), + Header: metadata.MD(state.data.mdata).Copy(), + } + t.stats.HandleRPC(s.ctx, inHeader) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) + handle(s) + return false +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + defer close(t.readerDone) + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s, true, se.Code, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) + } + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + t.Close() + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if t.operateHeaders(frame, handle, traceCtx) { + t.Close() + break + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + if f.Header().Flags.Has(http2.FlagDataEndStream) { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + // If the stream is not deleted from the transport's active streams map, then do a regular close stream. + if s, ok := t.getStream(f); ok { + t.closeStream(s, false, 0, false) + return + } + // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. + t.controlBuf.put(&cleanupStream{ + streamID: f.Header().StreamID, + rst: false, + rstCode: 0, + onWrite: func() {}, + }) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, &incomingSettings{ + ss: ss, + }) +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + errorf("transport: Got too many pings from the client, closing the connection.") + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + +func (t *http2Server) checkForHeaderListSize(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + return false + } + } + return true +} + +// WriteHeader sends the header metadata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + if s.updateHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + s.hdrMu.Unlock() + return nil +} + +func (t *http2Server) setResetPingStrikes() { + atomic.StoreUint32(&t.resetPingStrikes, 1) +} + +func (t *http2Server) writeHeaderLocked(s *Stream) error { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) + success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + onWrite: t.setResetPingStrikes, + }) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + if t.stats != nil { + // Note: Headers are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Header: s.header.Copy(), + Compression: s.sendCompress, + } + t.stats.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + if s.getState() == streamDone { + return nil + } + s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + } + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } + } + + // Attach the trailer metadata. + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + onWrite: t.setResetPingStrikes, + } + s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + // Send a RST_STREAM after the trailers if the client has not already half-closed. + rst := s.getState() == streamActive + t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) + if t.stats != nil { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // TODO(mmukhi, dfawley): Make sure this is the right code to return. + return status.Errorf(codes.Internal, "transport: %v", err) + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + // TODO(mmukhi, dfawley): Should the server write also return io.EOF? + s.cancel() + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + } + // Add some data to header frame so that we can equally distribute bytes across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + df := &dataFrame{ + streamID: s.id, + h: hdr, + d: data, + onEachWrite: t.setResetPingStrikes, + } + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + return t.controlBuf.put(df) +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + kpTimeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + // Initialize the different timers to their default values. + idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) + ageTimer := time.NewTimer(t.kp.MaxConnectionAge) + kpTimer := time.NewTimer(t.kp.Time) + defer func() { + // We need to drain the underlying channel in these timers after a call + // to Stop(), only if we are interested in resetting them. Clearly we + // are not interested in resetting them here. + idleTimer.Stop() + ageTimer.Stop() + kpTimer.Stop() + }() + + for { + select { + case <-idleTimer.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + idleTimer.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.drain(http2.ErrCodeNo, []byte{}) + return + } + idleTimer.Reset(val) + case <-ageTimer.C: + t.drain(http2.ErrCodeNo, []byte{}) + ageTimer.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-ageTimer.C: + // Close the connection after grace period. + infof("transport: closing server transport due to maximum connection age.") + t.Close() + case <-t.done: + } + return + case <-kpTimer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were + // here. Setup the timer to fire at kp.Time seconds from + // lastRead time and continue. + outstandingPing = false + kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && kpTimeoutLeft <= 0 { + infof("transport: closing server transport due to idleness.") + t.Close() + return + } + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + kpTimeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + kpTimeoutLeft -= sleepDuration + kpTimer.Reset(sleepDuration) + case <-t.done: + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() error { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + close(t.done) + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + if t.stats != nil { + connEnd := &stats.ConnEnd{} + t.stats.HandleConn(t.ctx, connEnd) + } + return err +} + +// deleteStream deletes the stream s from transport's active streams. +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + t.mu.Lock() + if _, ok := t.activeStreams[s.id]; ok { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } +} + +// finishStream closes the stream and puts the trailing headerFrame into controlbuf. +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + oldState := s.swapState(streamDone) + if oldState == streamDone { + // If the stream was already done, return. + return + } + + hdr.cleanup = &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.deleteStream(s, eosReceived) + }, + } + t.controlBuf.put(hdr) +} + +// closeStream clears the footprint of a stream when the stream is not needed any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + s.swapState(streamDone) + t.deleteStream(s, eosReceived) + + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() {}, + }) +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr +} + +func (t *http2Server) Drain() { + t.drain(http2.ErrCodeNo, []byte{}) +} + +func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + sid := t.maxStreamID + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + if len(t.activeStreams) == 0 { + g.closeConn = true + } + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + if g.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return false, fmt.Errorf("transport: Connection closing") + } + return true, nil + } + t.mu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainChan: + case <-timer.C: + case <-t.done: + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Server) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Server) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.done: + return -1 + case <-timer.C: + return -2 + } +} + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := grpcrand.Int63n(2*r) - r + return time.Duration(j) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go new file mode 100644 index 00000000..8f5f3349 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -0,0 +1,677 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + "math" + "net" + "net/http" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // http://http2.github.io/http2-spec/#SettingValues + http2InitHeaderTableSize = 4096 + // baseContentType is the base content-type for gRPC. This is a valid + // content-type on it's own, but can also include a content-subtype such as + // "proto" as a suffix after "+" or ";". See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + baseContentType = "application/grpc" +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.Internal, + } + statusCodeConvTab = map[codes.Code]http2.ErrCode{ + codes.Internal: http2.ErrCodeInternal, + codes.Canceled: http2.ErrCodeCancel, + codes.Unavailable: http2.ErrCodeRefusedStream, + codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, + codes.PermissionDenied: http2.ErrCodeInadequateSecurity, + } + // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. + HTTPStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } +) + +type parsedHeaderData struct { + encoding string + // statusGen caches the stream status received from the trailer the server + // sent. Client side only. Do not access directly. After all trailers are + // parsed, use the status method to retrieve the status. + statusGen *status.Status + // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not + // intended for direct access outside of parsing. + rawStatusCode *int + rawStatusMsg string + httpStatus *int + // Server side only fields. + timeoutSet bool + timeout time.Duration + method string + // key-value metadata map from the peer. + mdata map[string][]string + statsTags []byte + statsTrace []byte + contentSubtype string + + // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP). + // + // We are in gRPC mode (peer speaking gRPC) if: + // * We are client side and have already received a HEADER frame that indicates gRPC peer. + // * The header contains valid a content-type, i.e. a string starts with "application/grpc" + // And we should handle error specific to gRPC. + // + // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we + // are in HTTP fallback mode, and should handle error specific to HTTP. + isGRPC bool + grpcErr error + httpErr error + contentTypeErr string +} + +// decodeState configures decoding criteria and records the decoded data. +type decodeState struct { + // whether decoding on server side or not + serverSide bool + + // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS + // frame once decodeHeader function has been invoked and returned. + data parsedHeaderData +} + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "user-agent", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "grpc-status-details-bin", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. + "te": + return true + default: + return false + } +} + +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. +func isWhitelistedHeader(hdr string) bool { + switch hdr { + case ":authority", "user-agent": + return true + default: + return false + } +} + +// contentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func contentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// contentSubtype is assumed to be lowercase +func contentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} + +func (d *decodeState) status() *status.Status { + if d.data.statusGen == nil { + // No status-details were provided; generate status using code/msg. + d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg) + } + return d.data.statusGen +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + return status.Error(codes.Internal, "peer header list size exceeded limit") + } + + for _, hf := range frame.Fields { + d.processHeaderField(hf) + } + + if d.data.isGRPC { + if d.data.grpcErr != nil { + return d.data.grpcErr + } + if d.serverSide { + return nil + } + if d.data.rawStatusCode == nil && d.data.statusGen == nil { + // gRPC status doesn't exist. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propagated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propagated. + code := int(codes.Unknown) + d.data.rawStatusCode = &code + } + return nil + } + + // HTTP fallback mode + if d.data.httpErr != nil { + return d.data.httpErr + } + + var ( + code = codes.Internal // when header does not include HTTP status, return INTERNAL + ok bool + ) + + if d.data.httpStatus != nil { + code, ok = HTTPStatusConvTab[*(d.data.httpStatus)] + if !ok { + code = codes.Unknown + } + } + + return status.Error(code, d.constructHTTPErrMsg()) +} + +// constructErrMsg constructs error message to be returned in HTTP fallback mode. +// Format: HTTP status code and its corresponding message + content-type error message. +func (d *decodeState) constructHTTPErrMsg() string { + var errMsgs []string + + if d.data.httpStatus == nil { + errMsgs = append(errMsgs, "malformed header: missing HTTP status") + } else { + errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus)) + } + + if d.data.contentTypeErr == "" { + errMsgs = append(errMsgs, "transport: missing content-type field") + } else { + errMsgs = append(errMsgs, d.data.contentTypeErr) + } + + return strings.Join(errMsgs, "; ") +} + +func (d *decodeState) addMetadata(k, v string) { + if d.data.mdata == nil { + d.data.mdata = make(map[string][]string) + } + d.data.mdata[k] = append(d.data.mdata[k], v) +} + +func (d *decodeState) processHeaderField(f hpack.HeaderField) { + switch f.Name { + case "content-type": + contentSubtype, validContentType := contentSubtype(f.Value) + if !validContentType { + d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value) + return + } + d.data.contentSubtype = contentSubtype + // TODO: do we want to propagate the whole content-type in the metadata, + // or come up with a way to just propagate the content-subtype if it was set? + // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} + // in the metadata? + d.addMetadata(f.Name, f.Value) + d.data.isGRPC = true + case "grpc-encoding": + d.data.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) + return + } + d.data.rawStatusCode = &code + case "grpc-message": + d.data.rawStatusMsg = decodeGrpcMessage(f.Value) + case "grpc-status-details-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + return + } + s := &spb.Status{} + if err := proto.Unmarshal(v, s); err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + return + } + d.data.statusGen = status.FromProto(s) + case "grpc-timeout": + d.data.timeoutSet = true + var err error + if d.data.timeout, err = decodeTimeout(f.Value); err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) + } + case ":path": + d.data.method = f.Value + case ":status": + code, err := strconv.Atoi(f.Value) + if err != nil { + d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) + return + } + d.data.httpStatus = &code + case "grpc-tags-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) + return + } + d.data.statsTags = v + d.addMetadata(f.Name, string(v)) + case "grpc-trace-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) + return + } + d.data.statsTrace = v + d.addMetadata(f.Name, string(v)) + default: + if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { + break + } + v, err := decodeMetadataHeader(f.Name, f.Value) + if err != nil { + errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + return + } + d.addMetadata(f.Name, v) + } +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if m := d % r; m > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. +func encodeTimeout(t time.Duration) string { + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + if size > 9 { + // Spec allows for 8 digits plus the unit. + return 0, fmt.Errorf("transport: timeout string is too long: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + const maxHours = math.MaxInt64 / int64(time.Hour) + if d == time.Hour && t > maxHours { + // This timeout would overflow math.MaxInt64; clamp it. + return time.Duration(math.MaxInt64), nil + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildeByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + buf.WriteString(fmt.Sprintf("%%%02X", b)) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + buf.WriteByte(b) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", b)) + } + } + msg = msg[size:] + } + return buf.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + buf.WriteByte(c) + } else { + buf.WriteByte(byte(parsed)) + i += 2 + } + } else { + buf.WriteByte(c) + } + } + return buf.String() +} + +type bufWriter struct { + buf []byte + offset int + batchSize int + conn net.Conn + err error + + onFlush func() +} + +func newBufWriter(conn net.Conn, batchSize int) *bufWriter { + return &bufWriter{ + buf: make([]byte, batchSize*2), + batchSize: batchSize, + conn: conn, + } +} + +func (w *bufWriter) Write(b []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if w.batchSize == 0 { // Buffer has been disabled. + return w.conn.Write(b) + } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.Flush() + } + } + return n, err +} + +func (w *bufWriter) Flush() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + if w.onFlush != nil { + w.onFlush() + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.offset = 0 + return w.err +} + +type framer struct { + writer *bufWriter + fr *http2.Framer +} + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } + var r io.Reader = conn + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } + w := newBufWriter(conn, writeBufferSize) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), + } + f.fr.SetMaxReadFrameSize(http2MaxFrameLen) + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.MaxHeaderListSize = maxHeaderListSize + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go new file mode 100644 index 00000000..879df80c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/log.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains wrappers for grpclog functions. +// The transport package only logs to verbose level 2 by default. + +package transport + +import "google.golang.org/grpc/grpclog" + +const logLevel = 2 + +func infof(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Infof(format, args...) + } +} + +func warningf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Warningf(format, args...) + } +} + +func errorf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Errorf(format, args...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go new file mode 100644 index 00000000..a30da9eb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -0,0 +1,808 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPool() *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + } +} + +func (p *bufferPool) get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func (p *bufferPool) put(b *bytes.Buffer) { + p.pool.Put(b) +} + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + buffer *bytes.Buffer + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// +// Note: recvBuffer differs from buffer.Unbounded only in the fact that it +// holds a channel of recvMsg structs instead of objects implementing "item" +// interface. recvBuffer is written to much more often and using strict recvMsg +// structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg + err error +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if b.err != nil { + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last *bytes.Buffer // Stores the remaining data in the previous calls. + err error + freeBuffer func(*bytes.Buffer) +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + if r.last != nil { + // Read remaining data left in last call. + copied, _ := r.last.Read(p) + if r.last.Len() == 0 { + r.freeBuffer(r.last) + r.last = nil + } + return copied, nil + } + if r.closeStream != nil { + n, r.err = r.readClient(p) + } else { + n, r.err = r.read(p) + } + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + select { + case <-r.ctxDone: + return 0, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readClient(p []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readAdditional(m, p) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied, _ := m.buffer.Read(p) + if m.buffer.Len() == 0 { + r.freeBuffer(m.buffer) + r.last = nil + } else { + r.last = m.buffer + } + return copied, nil +} + +type streamState uint32 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + st ServerTransport // nil for client side Stream + ct *http2Client // nil for server side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream + recvCompress string + sendCompress string + buf *recvBuffer + trReader io.Reader + fc *inFlow + wq *writeQuota + + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + // On client side, header keeps the received header metadata. + // + // On server side, header keeps the header set by SetHeader(). The complete + // header will merged into this after t.WriteHeader() is called. + header metadata.MD + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return + } + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. +func (s *Stream) Header() (metadata.MD, error) { + if s.headerChan == nil { + // On server side, return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil + } + s.waitOnHeader() + if !s.headerValid { + return nil, s.status.Err() + } + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if err != nil { + t.er = err + return + } + t.windowHandler(n) + return +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int + ChannelzParentID int64 + MaxHeaderListSize *uint32 + HeaderTableSize *uint32 +} + +// NewServerTransport creates a ServerTransport with conn or non-nil error +// if it fails. +func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { + return newHTTP2Server(conn, config) +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client + // connection. Only one of TransportCredentials and CredsBundle is non-nil. + TransportCredentials credentials.TransportCredentials + // CredsBundle is the credentials bundle to be used. Only one of + // TransportCredentials and CredsBundle is non-nil. + CredsBundle credentials.Bundle + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandler stores the handler for stats. + StatsHandler stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID int64 + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 +} + +// TargetInfo contains the information of the target such as network address and metadata. +type TargetInfo struct { + Addr string + Metadata interface{} + Authority string +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string + + PreviousAttempts int // value of grpc-previous-rpc-attempts header to set +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close() error + + // GracefulClose starts to tear down the transport: the transport will stop + // accepting new RPCs and NewStream will return error. Once all streams are + // finished, the transport will close. + // + // It does not block. + GracefulClose() + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received. + GetGoAwayReason() GoAwayReason + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream), func(context.Context, string) context.Context) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() error + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain() + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indiacte application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 +) + +// channelzData is used to store channelz related data for http2Client and http2Server. +// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // Client side: The number of streams that have ended successfully by receiving + // EoS bit set frame from server. + // Server side: The number of streams that have ended successfully by sending + // frame with EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type + // instead of time.Time since it's more costly to atomically update time.Time variable than int64 + // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. + lastStreamCreatedTime int64 + msgSent int64 + msgRecv int64 + lastMsgSentTime int64 + lastMsgRecvTime int64 +} + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 00000000..34d31b5e --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point +// healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a +// connection is broken and send pings so intermediaries will be aware of the +// liveness of the connection. Make sure these parameters are set in +// coordination with the keepalive policy on the server, as incompatible +// settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it + // pings the server to see if the transport is still alive. + // If set below 10s, a minimum value of 10s will be used instead. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client sends keepalive pings even with no active RPCs. If false, + // when there are no active RPCs, Time and Timeout will be ignored and no + // keepalive pings will be sent. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the +// server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an + // idle connection would be closed by sending a GoAway. Idleness duration is + // defined since the most recent time the number of outstanding RPCs became + // zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a + // connection may exist before it will be closed by sending a GoAway. A + // random jitter of +/-10% will be added to MaxConnectionAge to spread out + // connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + // which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it + // pings the client to see if the transport is still alive. + // If set below 1s, a minimum value of 1s will be used instead. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the +// server-side. Server will close connection with a client that violates this +// policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending + // a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server allows keepalive pings even when there are no active + // streams(RPCs). If false, and client sends ping when there are no active + // streams, server will send GOAWAY and close the connection. + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 00000000..cf6d1b94 --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,209 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "context" + "fmt" + "strings" +) + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := MD{} + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := MD{} + var key string + for i, s := range kv { + if i%2 == 0 { + key = strings.ToLower(s) + continue + } + md[key] = append(md[key], s) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + return Join(md) +} + +// Get obtains the values for a given key. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at that key. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Join joins any number of mds into a single MD. +// The order of values for each key is determined by the order in which +// the mds containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the +// documentation of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + added[len(added)-1] = make([]string, len(kv)) + copy(added[len(added)-1], kv) + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromIncomingContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdIncomingKey{}).(MD) + return +} + +// FromOutgoingContextRaw returns the un-merged, intermediary contents +// of rawMD. Remember to perform strings.ToLower on the keys. The returned +// MD should not be modified. Writing to it may cause races. Modification +// should be made to copies of the returned MD. +// +// This is intended for gRPC-internal use ONLY. +func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + mds := make([]MD, 0, len(raw.added)+1) + mds = append(mds, raw.md) + for _, vv := range raw.added { + mds = append(mds, Pairs(vv...)) + } + return Join(mds...), ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go new file mode 100644 index 00000000..c9f79dc5 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go @@ -0,0 +1,293 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "context" + "errors" + "fmt" + "net" + "strconv" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 +) + +var ( + errMissingAddr = errors.New("missing address") + errWatcherClose = errors.New("watcher has been closed") + + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV +) + +// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and +// create watchers that poll the DNS server using the frequency set by freq. +func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { + return &dnsResolver{freq: freq}, nil +} + +// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create +// watchers that poll the DNS server using the default frequency defined by defaultFreq. +func NewDNSResolver() (Resolver, error) { + return NewDNSResolverWithFreq(defaultFreq) +} + +// dnsResolver handles name resolution for names following the DNS scheme +type dnsResolver struct { + // frequency of polling the DNS server that the watchers created by this resolver will use. + freq time.Duration +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in square brackets, brackets +// are stripped when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err := net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v", target) +} + +// Resolve creates a watcher that watches the name resolution of the target. +func (r *dnsResolver) Resolve(target string) (Watcher, error) { + host, port, err := parseTarget(target) + if err != nil { + return nil, err + } + + if net.ParseIP(host) != nil { + ipWatcher := &ipWatcher{ + updateChan: make(chan *Update, 1), + } + host, _ = formatIP(host) + ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} + return ipWatcher, nil + } + + ctx, cancel := context.WithCancel(context.Background()) + return &dnsWatcher{ + r: r, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + t: time.NewTimer(0), + }, nil +} + +// dnsWatcher watches for the name resolution update for a specific target +type dnsWatcher struct { + r *dnsResolver + host string + port string + // The latest resolved address set + curAddrs map[string]*Update + ctx context.Context + cancel context.CancelFunc + t *time.Timer +} + +// ipWatcher watches for the name resolution update for an IP address. +type ipWatcher struct { + updateChan chan *Update +} + +// Next returns the address resolution Update for the target. For IP address, +// the resolution is itself, thus polling name server is unnecessary. Therefore, +// Next() will return an Update the first time it is called, and will be blocked +// for all following calls as no Update exists until watcher is closed. +func (i *ipWatcher) Next() ([]*Update, error) { + u, ok := <-i.updateChan + if !ok { + return nil, errWatcherClose + } + return []*Update{u}, nil +} + +// Close closes the ipWatcher. +func (i *ipWatcher) Close() { + close(i.updateChan) +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the server is a backend server. + Backend AddressType = iota + // GRPCLB indicates the server is a grpclb load balancer. + GRPCLB +) + +// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The +// name resolver used by the grpclb balancer is required to provide this type of metadata in +// its address updates. +type AddrMetadataGRPCLB struct { + // AddrType is the type of server (grpc load balancer or backend). + AddrType AddressType + // ServerName is the name of the grpc load balancer. Used for authentication. + ServerName string +} + +// compileUpdate compares the old resolved addresses and newly resolved addresses, +// and generates an update list +func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { + var res []*Update + for a, u := range w.curAddrs { + if _, ok := newAddrs[a]; !ok { + u.Op = Delete + res = append(res, u) + } + } + for a, u := range newAddrs { + if _, ok := w.curAddrs[a]; !ok { + res = append(res, u) + } + } + return res +} + +func (w *dnsWatcher) lookupSRV() map[string]*Update { + newAddrs := make(map[string]*Update) + _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(w.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs[addr] = &Update{Addr: addr, + Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} + } + } + return newAddrs +} + +func (w *dnsWatcher) lookupHost() map[string]*Update { + newAddrs := make(map[string]*Update) + addrs, err := lookupHost(w.ctx, w.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + w.port + newAddrs[addr] = &Update{Addr: addr} + } + return newAddrs +} + +func (w *dnsWatcher) lookup() []*Update { + newAddrs := w.lookupSRV() + if newAddrs == nil { + // If failed to get any balancer address (either no corresponding SRV for the + // target, or caused by failure during resolution/parsing of the balancer target), + // return any A record info available. + newAddrs = w.lookupHost() + } + result := w.compileUpdate(newAddrs) + w.curAddrs = newAddrs + return result +} + +// Next returns the resolved address update(delta) for the target. If there's no +// change, it will sleep for 30 mins and try to resolve again after that. +func (w *dnsWatcher) Next() ([]*Update, error) { + for { + select { + case <-w.ctx.Done(): + return nil, errWatcherClose + case <-w.t.C: + } + result := w.lookup() + // Next lookup should happen after an interval defined by w.r.freq. + w.t.Reset(w.r.freq) + if len(result) > 0 { + return result, nil + } + } +} + +func (w *dnsWatcher) Close() { + w.cancel() +} diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go new file mode 100644 index 00000000..f4c1c8b6 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -0,0 +1,68 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package naming defines the naming API and related data structures for gRPC. +// +// This package is deprecated: please use package resolver instead. +package naming + +// Operation defines the corresponding operations for a name resolution change. +// +// Deprecated: please use package resolver. +type Operation uint8 + +const ( + // Add indicates a new address is added. + Add Operation = iota + // Delete indicates an existing address is deleted. + Delete +) + +// Update defines a name resolution update. Notice that it is not valid having both +// empty string Addr and nil Metadata in an Update. +// +// Deprecated: please use package resolver. +type Update struct { + // Op indicates the operation of the update. + Op Operation + // Addr is the updated address. It is empty string if there is no address update. + Addr string + // Metadata is the updated metadata. It is nil if there is no metadata update. + // Metadata is not required for a custom naming implementation. + Metadata interface{} +} + +// Resolver creates a Watcher for a target to track its resolution changes. +// +// Deprecated: please use package resolver. +type Resolver interface { + // Resolve creates a Watcher for target. + Resolve(target string) (Watcher, error) +} + +// Watcher watches for the updates on the specified target. +// +// Deprecated: please use package resolver. +type Watcher interface { + // Next blocks until an update or error happens. It may return one or more + // updates. The first call should get the full set of the results. It should + // return an error if and only if Watcher cannot recover. + Next() ([]*Update, error) + // Close closes the Watcher. + Close() +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 00000000..e01d219f --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 00000000..00447894 --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,229 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "io" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/status" +) + +// v2PickerWrapper wraps a balancer.Picker while providing the +// balancer.V2Picker API. It requires a pickerWrapper to generate errors +// including the latest connectionError. To be deleted when balancer.Picker is +// updated to the balancer.V2Picker API. +type v2PickerWrapper struct { + picker balancer.Picker + connErr *connErr +} + +func (v *v2PickerWrapper) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + sc, done, err := v.picker.Pick(info.Ctx, info) + if err != nil { + if err == balancer.ErrTransientFailure { + return balancer.PickResult{}, balancer.TransientFailureError(fmt.Errorf("%v, latest connection error: %v", err, v.connErr.connectionError())) + } + return balancer.PickResult{}, err + } + return balancer.PickResult{SubConn: sc, Done: done}, nil +} + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.V2Picker + + // The latest connection error. TODO: remove when V1 picker is deprecated; + // balancer should be responsible for providing the error. + *connErr +} + +type connErr struct { + mu sync.Mutex + err error +} + +func (c *connErr) updateConnectionError(err error) { + c.mu.Lock() + c.err = err + c.mu.Unlock() +} + +func (c *connErr) connectionError() error { + c.mu.Lock() + err := c.err + c.mu.Unlock() + return err +} + +func newPickerWrapper() *pickerWrapper { + return &pickerWrapper{blockingCh: make(chan struct{}), connErr: &connErr{}} +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + pw.updatePickerV2(&v2PickerWrapper{picker: p, connErr: pw.connErr}) +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (pw *pickerWrapper) updatePickerV2(p balancer.V2Picker) { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return + } + pw.picker = p + // pw.blockingCh should never be nil. + close(pw.blockingCh) + pw.blockingCh = make(chan struct{}) + pw.mu.Unlock() +} + +func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { + acw.mu.Lock() + ac := acw.ac + acw.mu.Unlock() + ac.incrCallsStarted() + return func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { + var ch chan struct{} + + var lastPickErr error + for { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if pw.picker == nil { + ch = pw.blockingCh + } + if ch == pw.blockingCh { + // This could happen when either: + // - pw.picker is nil (the previous if condition), or + // - has called pick on the current picker. + pw.mu.Unlock() + select { + case <-ctx.Done(): + var errStr string + if lastPickErr != nil { + errStr = "latest balancer error: " + lastPickErr.Error() + } else if connectionErr := pw.connectionError(); connectionErr != nil { + errStr = "latest connection error: " + connectionErr.Error() + } else { + errStr = ctx.Err().Error() + } + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + case context.Canceled: + return nil, nil, status.Error(codes.Canceled, errStr) + } + case <-ch: + } + continue + } + + ch = pw.blockingCh + p := pw.picker + pw.mu.Unlock() + + pickResult, err := p.Pick(info) + + if err != nil { + if err == balancer.ErrNoSubConnAvailable { + continue + } + if tfe, ok := err.(interface{ IsTransientFailure() bool }); ok && tfe.IsTransientFailure() { + if !failfast { + lastPickErr = err + continue + } + return nil, nil, status.Error(codes.Unavailable, err.Error()) + } + if _, ok := status.FromError(err); ok { + return nil, nil, err + } + // err is some other error. + return nil, nil, status.Error(codes.Unknown, err.Error()) + } + + acw, ok := pickResult.SubConn.(*acBalancerWrapper) + if !ok { + grpclog.Error("subconn returned from pick is not *acBalancerWrapper") + continue + } + if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if channelz.IsOn() { + return t, doneChannelzWrapper(acw, pickResult.Done), nil + } + return t, pickResult.Done, nil + } + if pickResult.Done != nil { + // Calling done with nil error, no bytes sent and no bytes received. + // DoneInfo with default value works. + pickResult.Done(balancer.DoneInfo{}) + } + grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (pw *pickerWrapper) close() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.done = true + close(pw.blockingCh) +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 00000000..c43dac9a --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,159 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "errors" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" +) + +// PickFirstBalancerName is the name of the pick_first balancer. +const PickFirstBalancerName = "pick_first" + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return PickFirstBalancerName +} + +type pickfirstBalancer struct { + state connectivity.State + cc balancer.ClientConn + sc balancer.SubConn +} + +var _ balancer.V2Balancer = &pickfirstBalancer{} // Assert we implement v2 + +func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + b.ResolverError(err) + return + } + b.UpdateClientConnState(balancer.ClientConnState{ResolverState: resolver.State{Addresses: addrs}}) // Ignore error +} + +func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + b.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s}) +} + +func (b *pickfirstBalancer) ResolverError(err error) { + switch b.state { + case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: + // Set a failing picker if we don't have a good picker. + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: status.Errorf(codes.Unavailable, "name resolver error: %v", err)}}, + ) + } + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err) + } +} + +func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { + if len(cs.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + if b.sc == nil { + var err error + b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if grpclog.V(2) { + grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: status.Errorf(codes.Unavailable, "error creating connection: %v", err)}}, + ) + return balancer.ErrBadResolverState + } + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) + b.sc.Connect() + } else { + b.sc.UpdateAddresses(cs.ResolverState.Addresses) + b.sc.Connect() + } + return nil +} + +func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + } + if b.sc != sc { + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + } + return + } + b.state = s.ConnectivityState + if s.ConnectivityState == connectivity.Shutdown { + b.sc = nil + return + } + + switch s.ConnectivityState { + case connectivity.Ready, connectivity.Idle: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + case connectivity.Connecting: + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + case connectivity.TransientFailure: + err := balancer.ErrTransientFailure + // TODO: this can be unconditional after the V1 API is removed, as + // SubConnState will always contain a connection error. + if s.ConnectionError != nil { + err = balancer.TransientFailureError(s.ConnectionError) + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: s.ConnectivityState, + Picker: &picker{err: err}, + }) + } +} + +func (b *pickfirstBalancer) Close() { +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go new file mode 100644 index 00000000..76acbbcc --- /dev/null +++ b/vendor/google.golang.org/grpc/preloader.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// PreparedMsg is responsible for creating a Marshalled and Compressed object. +// +// This API is EXPERIMENTAL. +type PreparedMsg struct { + // Struct for preparing msg before sending them + encodedData []byte + hdr []byte + payload []byte +} + +// Encode marshalls and compresses the message using the codec and compressor for the stream. +func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { + return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") + } + + // check if the context has the relevant information to prepareMsg + if rpcInfo.preloaderInfo == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") + } + if rpcInfo.preloaderInfo.codec == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") + } + + // prepare the msg + data, err := encode(rpcInfo.preloaderInfo.codec, msg) + if err != nil { + return err + } + p.encodedData = data + compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + if err != nil { + return err + } + p.hdr, p.payload = msgHeader(data, compData) + return nil +} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go new file mode 100644 index 00000000..f8f69bfb --- /dev/null +++ b/vendor/google.golang.org/grpc/proxy.go @@ -0,0 +1,152 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bufio" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" +) + +const proxyAuthHeaderKey = "Proxy-Authorization" + +var ( + // errDisabled indicates that proxy is disabled for the address. + errDisabled = errors.New("proxy is disabled for the address") + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(ctx context.Context, address string) (*url.URL, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return nil, err + } + if url == nil { + return nil, errDisabled + } + return url, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: backendAddr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + } + if t := proxyURL.User; t != nil { + u := t.Username() + p, _ := t.Password() + req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) + } + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// newProxyDialer returns a dialer that connects to proxy first if necessary. +// The returned dialer checks if a proxy is necessary, dial to the proxy with the +// provided dialer, does HTTP CONNECT handshake and returns the connection. +func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (conn net.Conn, err error) { + var newAddr string + proxyURL, err := mapAddress(ctx, addr) + if err != nil { + if err != errDisabled { + return nil, err + } + newAddr = addr + } else { + newAddr = proxyURL.Host + } + + conn, err = dialer(ctx, newAddr) + if err != nil { + return + } + if proxyURL != nil { + // proxy is disabled if proxyURL is nil. + conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL) + } + return + } +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 00000000..fe14b2fb --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,253 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +import ( + "context" + "net" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will be +// used as the scheme registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. The default +// default scheme is "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// GetDefaultScheme gets the default scheme that will be used. +func GetDefaultScheme() string { + return defaultScheme +} + +// AddressType indicates the address type returned by name resolution. +// +// Deprecated: use Attributes in Address instead. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + // + // Deprecated: use Attributes in Address instead. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + // + // Deprecated: use Attributes in Address instead. + GRPCLB +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + + // ServerName is the name of this address. + // If non-empty, the ServerName is used as the transport certification authority for + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. + // + // If Type is GRPCLB, ServerName should be the name of the remote load + // balancer, not the name of the backend. + // + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. + ServerName string + + // Attributes contains arbitrary data about this address intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes + + // Type is the type of this address. + // + // Deprecated: use Attributes instead. + Type AddressType + + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + // + // Deprecated: use Attributes instead. + Metadata interface{} +} + +// BuildOptions includes additional information for the builder to create +// the resolver. +type BuildOptions struct { + // DisableServiceConfig indicates whether a resolver implementation should + // fetch service config data. + DisableServiceConfig bool + // DialCreds is the transport credentials used by the ClientConn for + // communicating with the target gRPC service (set via + // WithTransportCredentials). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle used by the ClientConn for + // communicating with the target gRPC service (set via + // WithCredentialsBundle). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + CredsBundle credentials.Bundle + // Dialer is the custom dialer used by the ClientConn for dialling the + // target gRPC service (set via WithDialer). In cases where a name + // resolution service requires the same dialer, the resolver may use this + // field. In most cases though, it is not appropriate, and this field may + // be ignored. + Dialer func(context.Context, string) (net.Conn, error) +} + +// State contains the current Resolver state relevant to the ClientConn. +type State struct { + // Addresses is the latest set of resolved addresses for the target. + Addresses []Address + + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. + ServiceConfig *serviceconfig.ParseResult + + // Attributes contains arbitrary data about the resolver intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // UpdateState updates the state of the ClientConn appropriately. + UpdateState(State) + // ReportError notifies the ClientConn that the Resolver encountered an + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. + ReportError(error) + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + // + // Deprecated: Use UpdateState instead. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + // + // Deprecated: Use UpdateState instead. + NewServiceConfig(serviceConfig string) + // ParseServiceConfig parses the provided service config and returns an + // object that provides the parsed config. + ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// It is parsed from the target string that gets passed into Dial or DialContext by the user. And +// grpc passes it to the resolver and the balancer. +// +// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will +// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed +// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// +// If the target does not contain a scheme, we will apply the default scheme, and set the Target to +// be the full target string. e.g. "foo.bar" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}. +// +// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the +// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target +// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into +// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}. +type Target struct { + Scheme string + Authority string + Endpoint string +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOptions includes additional information for ResolveNow. +type ResolveNowOptions struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. + ResolveNow(ResolveNowOptions) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 00000000..edfda866 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,222 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "strings" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + cc *ClientConn + resolverMu sync.Mutex + resolver resolver.Resolver + done *grpcsync.Event + curState resolver.State + + pollingMu sync.Mutex + polling chan struct{} +} + +// newCCResolverWrapper uses the resolver.Builder to build a Resolver and +// returns a ccResolverWrapper object which wraps the newly built resolver. +func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { + ccr := &ccResolverWrapper{ + cc: cc, + done: grpcsync.NewEvent(), + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + rbo := resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + } + + var err error + // We need to hold the lock here while we assign to the ccr.resolver field + // to guard against a data race caused by the following code path, + // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up + // accessing ccr.resolver which is being assigned here. + ccr.resolverMu.Lock() + defer ccr.resolverMu.Unlock() + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + if err != nil { + return nil, err + } + return ccr, nil +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.resolverMu.Lock() + if !ccr.done.HasFired() { + ccr.resolver.ResolveNow(o) + } + ccr.resolverMu.Unlock() +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolverMu.Lock() + ccr.resolver.Close() + ccr.done.Fire() + ccr.resolverMu.Unlock() +} + +// poll begins or ends asynchronous polling of the resolver based on whether +// err is ErrBadResolverState. +func (ccr *ccResolverWrapper) poll(err error) { + ccr.pollingMu.Lock() + defer ccr.pollingMu.Unlock() + if err != balancer.ErrBadResolverState { + // stop polling + if ccr.polling != nil { + close(ccr.polling) + ccr.polling = nil + } + return + } + if ccr.polling != nil { + // already polling + return + } + p := make(chan struct{}) + ccr.polling = p + go func() { + for i := 0; ; i++ { + ccr.resolveNow(resolver.ResolveNowOptions{}) + t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i)) + select { + case <-p: + t.Stop() + return + case <-ccr.done.Done(): + // Resolver has been closed. + t.Stop() + return + case <-t.C: + select { + case <-p: + return + default: + } + // Timer expired; re-resolve. + } + } + }() +} + +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { + if ccr.done.HasFired() { + return + } + channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(s) + } + ccr.curState = s + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +func (ccr *ccResolverWrapper) ReportError(err error) { + if ccr.done.HasFired() { + return + } + channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) +} + +// NewAddress is called by the resolver implementation to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + if ccr.done.HasFired() { + return + } + channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + } + ccr.curState.Addresses = addrs + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +// NewServiceConfig is called by the resolver implementation to send service +// configs to gRPC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + if ccr.done.HasFired() { + return + } + channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + if ccr.cc.dopts.disableServiceConfig { + channelz.Info(ccr.cc.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + ccr.poll(balancer.ErrBadResolverState) + return + } + if channelz.IsOn() { + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + } + ccr.curState.ServiceConfig = scpr + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.AddTraceEvent(ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), + Severity: channelz.CtINFO, + }) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 00000000..cf9dbe7f --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,889 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math" + "net/url" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + w, err := gzip.NewWriterLevel(ioutil.Discard, level) + if err != nil { + panic(err) + } + return w + }, + }, + }, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + compressorType string + failFast bool + stream ClientStream + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int +} + +func defaultCallInfo() *callInfo { + return &callInfo{ + failFast: true, + maxRetryRPCBufferSize: 256 * 1024, // 256KB + } +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo) {} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(c *callInfo) error { return nil } +func (o HeaderCallOption) after(c *callInfo) { + if c.stream != nil { + *o.HeaderAddr, _ = c.stream.Header() + } +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(c *callInfo) error { return nil } +func (o TrailerCallOption) after(c *callInfo) { + if c.stream != nil { + *o.TrailerAddr = c.stream.Trailer() + } +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(c *callInfo) error { return nil } +func (o PeerCallOption) after(c *callInfo) { + if c.stream != nil { + if x, ok := peer.FromContext(c.stream.Context()); ok { + *o.PeerAddr = *x + } + } +} + +// WaitForReady configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If waitForReady is false, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error. gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// +// By default, RPCs don't "wait for ready". +func WaitForReady(waitForReady bool) CallOption { + return FailFastCallOption{FailFast: !waitForReady} +} + +// FailFast is the opposite of WaitForReady. +// +// Deprecated: use WaitForReady. +func FailFast(failFast bool) CallOption { + return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// This is an EXPERIMENTAL API. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(c *callInfo) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. +func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can receive. +// This is an EXPERIMENTAL API. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. +func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can send. +// This is an EXPERIMENTAL API. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil +} +func (o MaxSendMsgSizeCallOption) after(c *callInfo) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// This is an EXPERIMENTAL API. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(c *callInfo) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// This API is EXPERIMENTAL. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// This is an EXPERIMENTAL API. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(c *callInfo) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If ForceCodec is not also used, the content-subtype will be used to look up +// the Codec to use in the registry controlled by RegisterCodec. See the +// documentation on RegisterCodec for details on registration. The lookup of +// content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If ForceCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// This is an EXPERIMENTAL API. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(c *callInfo) {} + +// ForceCodec returns a CallOption that will set the given Codec to be +// used for all request and response messages for a call. The result of calling +// String() will be used as the content-subtype in a case-insensitive manner. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// This is an EXPERIMENTAL API. +func ForceCodec(codec encoding.Codec) CallOption { + return ForceCodecCallOption{Codec: codec} +} + +// ForceCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// This is an EXPERIMENTAL API. +type ForceCodecCallOption struct { + Codec encoding.Codec +} + +func (o ForceCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o ForceCodecCallOption) after(c *callInfo) {} + +// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of +// an encoding.Codec. +// +// Deprecated: use ForceCodec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// This is an EXPERIMENTAL API. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o CustomCodecCallOption) after(c *callInfo) {} + +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// This API is EXPERIMENTAL. +func MaxRetryRPCBufferSize(bytes int) CallOption { + return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// This is an EXPERIMENTAL API. +type MaxRetryRPCBufferSizeCallOption struct { + MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { + c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize + return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * an error from the status package +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg interface{}) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(len(b)) > math.MaxUint32 { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} + +// compress returns the input bytes compressed by compressor or cp. If both +// compressors are nil, returns nil. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { + if compressor == nil && cp == nil { + return nil, nil + } + wrapErr := func(err error) error { + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + cbuf := &bytes.Buffer{} + if compressor != nil { + z, err := compressor.Compress(cbuf) + if err != nil { + return nil, wrapErr(err) + } + if _, err := z.Write(in); err != nil { + return nil, wrapErr(err) + } + if err := z.Close(); err != nil { + return nil, wrapErr(err) + } + } else { + if err := cp.Do(cbuf, in); err != nil { + return nil, wrapErr(err) + } + } + return cbuf.Bytes(), nil +} + +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + hdr = make([]byte, headerLen) + if compData != nil { + hdr[0] = byte(compressionMade) + data = compData + } else { + hdr[0] = byte(compressionNone) + } + + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) + return hdr, data +} + +func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + SentTime: t, + } +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +type payloadInfo struct { + wireLength int // The compressed length got from wire. + uncompressedBytes []byte +} + +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { + pf, d, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return nil, err + } + if payInfo != nil { + payInfo.wireLength = len(d) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + return nil, st.Err() + } + + var size int + if pf == compressionMade { + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + d, err = dc.Do(bytes.NewReader(d)) + size = len(d) + } else { + d, size, err = decompress(compressor, d, maxReceiveMessageSize) + } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } else { + size = len(d) + } + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + return d, nil +} + +// Using compressor, decompress d, returning data and size. +// Optionally, if data will be over maxReceiveMessageSize, just return the size. +func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return nil, 0, err + } + if sizer, ok := compressor.(interface { + DecompressedSize(compressedBytes []byte) int + }); ok { + if size := sizer.DecompressedSize(d); size >= 0 { + if size > maxReceiveMessageSize { + return nil, size, nil + } + // size is used as an estimate to size the buffer, but we + // will read more data if available. + // +MinRead so ReadFrom will not reallocate if size is correct. + buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return buf.Bytes(), int(bytesRead), err + } + } + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. + d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return d, len(d), err +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + if err != nil { + return err + } + if err := c.Unmarshal(d, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + } + if payInfo != nil { + payInfo.uncompressedBytes = d + } + return nil +} + +// Information about RPC +type rpcInfo struct { + failfast bool + preloaderInfo *compressorInfo +} + +// Information about Preloader +// Responsible for storing codec, and compressors +// If stream (s) has context s.Context which stores rpcInfo that has non nil +// pointers to codec, and compressors, then we can use preparedMsg for Async message prep +// and reuse marshalled bytes +type compressorInfo struct { + codec baseCodec + cp Compressor + comp encoding.Compressor +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ + failfast: failfast, + preloaderInfo: &compressorInfo{ + codec: codec, + cp: cp, + comp: comp, + }, + }) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.Code instead. +func Code(err error) codes.Code { + return status.Code(err) +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.Convert and Message method instead. +func ErrorDesc(err error) string { + return status.Convert(err).Message() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...interface{}) error { + return status.Errorf(c, format, a...) +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if err == nil || err == io.EOF { + return err + } + if err == io.ErrUnexpectedEOF { + return status.Error(codes.Internal, err.Error()) + } + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it. + return nil + } + + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = encoding.GetCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = encoding.GetCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// parseDialTarget returns the network and address to pass to dialer +func parseDialTarget(target string) (net string, addr string) { + net = "tcp" + + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + net = n + addr = target[m1+1:] + return net, addr + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr = t.Path + if scheme == "unix" { + net = scheme + if addr == "" { + addr = t.Host + } + return net, addr + } + } + + return net, target +} + +// channelzData is used to store channelz related data for ClientConn, addrConn and Server. +// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of + // time.Time since it's more costly to atomically update time.Time variable than int64 variable. + lastCallStartedTime int64 +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 6. +// +// Older versions are kept for compatibility. They may be removed if +// compatibility cannot be maintained. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true +) + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 00000000..edfcdcae --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,1640 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/trace" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 +) + +var statusOK = status.New(codes.OK, "") + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc + Metadata interface{} +} + +// service consists of the information of the server serving this service and +// the methods in this service. +type service struct { + server interface{} // the server for service methods + md map[string]*MethodDesc + sd map[string]*StreamDesc + mdata interface{} +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts serverOptions + + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + m map[string]*service // service name -> service info + events trace.EventLog + + quit *grpcsync.Event + done *grpcsync.Event + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + + channelzID int64 // channelz unique identification number + czData *channelzData +} + +type serverOptions struct { + creds credentials.TransportCredentials + codec baseCodec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + connectionTimeout time.Duration + maxHeaderListSize *uint32 + headerTableSize *uint32 +} + +var defaultServerOptions = serverOptions{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, +} + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption interface { + apply(*serverOptions) +} + +// EmptyServerOption does not alter the server configuration. It can be embedded +// in another structure to build custom server options. +// +// This API is EXPERIMENTAL. +type EmptyServerOption struct{} + +func (EmptyServerOption) apply(*serverOptions) {} + +// funcServerOption wraps a function that modifies serverOptions into an +// implementation of the ServerOption interface. +type funcServerOption struct { + f func(*serverOptions) +} + +func (fdo *funcServerOption) apply(do *serverOptions) { + fdo.f(do) +} + +func newFuncServerOption(f func(*serverOptions)) *funcServerOption { + return &funcServerOption{ + f: f, + } +} + +// WriteBufferSize determines how much data can be batched before doing a write on the wire. +// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. +// The default value for this buffer is 32KB. +// Zero will disable the write buffer such that each write will be on underlying connection. +// Note: A Send call may not directly translate to a write. +func WriteBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.writeBufferSize = s + }) +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +// The default value for this buffer is 32KB. +// Zero will disable read buffer for a connection so data framer can access the underlying +// conn directly. +func ReadBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.readBufferSize = s + }) +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + }) +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + }) +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + if kp.Time > 0 && kp.Time < time.Second { + grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s") + kp.Time = time.Second + } + + return newFuncServerOption(func(o *serverOptions) { + o.keepaliveParams = kp + }) +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.keepalivePolicy = kep + }) +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +func CustomCodec(codec Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. +func RPCCompressor(cp Compressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.cp = cp + }) +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. +func RPCDecompressor(dc Decompressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.dc = dc + }) +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxReceiveMessageSize = m + }) +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default `math.MaxInt32`. +func MaxSendMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxSendMessageSize = m + }) +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.creds = c + }) +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + }) +} + +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + }) +} + +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for stream RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + }) +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.statsHandler = h + }) +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function and stream interceptor (if set) have full access to +// the ServerStream, including its Context. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + }) +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +// +// This API is EXPERIMENTAL. +func ConnectionTimeout(d time.Duration) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.connectionTimeout = d + }) +} + +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxHeaderListSize = &s + }) +} + +// HeaderTableSize returns a ServerOption that sets the size of dynamic +// header table for stream. +// +// This API is EXPERIMENTAL. +func HeaderTableSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.headerTableSize = &s + }) +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range opt { + o.apply(&opts) + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[transport.ServerTransport]bool), + m: make(map[string]*service), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + czData: new(channelzData), + } + chainUnaryServerInterceptors(s) + chainStreamServerInterceptors(s) + s.cv = sync.NewCond(&s.mu) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + + if channelz.IsOn() { + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.m[sd.ServiceName]; ok { + grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + srv := &service{ + server: ss, + md: make(map[string]*MethodDesc), + sd: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + srv.md[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + srv.sd[d.StreamName] = d + } + s.m[sd.ServiceName] = srv +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.m { + methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) + for m := range srv.md { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.sd { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if s.opts.creds == nil { + return rawConn, nil, nil + } + return s.opts.creds.ServerHandshake(rawConn) +} + +type listenSocket struct { + net.Listener + channelzID int64 +} + +func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { + return &channelz.SocketInternalMetric{ + SocketOptions: channelz.GetSocketOption(l.Listener), + LocalAddr: l.Listener.Addr(), + } +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + if channelz.IsOn() { + channelz.RemoveEntry(l.channelzID) + } + return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + // Serve called after Stop or GracefulStop. + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + if s.quit.HasFired() { + // Stop or GracefulStop called; block until done and return nil. + <-s.done.Done() + } + }() + + ls := &listenSocket{Listener: lis} + s.lis[ls] = true + + if channelz.IsOn() { + ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + } + s.mu.Unlock() + + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) + } + s.mu.Unlock() + }() + + var tempDelay time.Duration // how long to sleep on accept failure + + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.quit.Done(): + timer.Stop() + return nil + } + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + if s.quit.HasFired() { + return nil + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(rawConn) + s.serveWG.Done() + }() + } +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(rawConn net.Conn) { + if s.quit.HasFired() { + rawConn.Close() + return + } + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + channelz.Warningf(s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + rawConn.Close() + } + rawConn.SetDeadline(time.Time{}) + return + } + + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(conn, authInfo) + if st == nil { + return + } + + rawConn.SetDeadline(time.Time{}) + if !s.addConn(st) { + return + } + go func() { + s.serveStreams(st) + s.removeConn(st) + }() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + ChannelzParentID: s.channelzID, + MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, + } + st, err := transport.NewServerTransport("http2", c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + channelz.Warning(s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + return nil + } + + return st +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close() + var wg sync.WaitGroup + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx + } + tr := trace.New("grpc.Recv."+methodFamily(method), method) + return trace.NewContext(ctx, tr) + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL +// and subject to change. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(st) { + return + } + defer s.removeConn(st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + tr, ok := trace.FromContext(stream.Context()) + if !ok { + return nil + } + + trInfo = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: st.RemoteAddr(), + }, + } + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = time.Until(dl) + } + return trInfo +} + +func (s *Server) addConn(st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + st.Close() + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + st.Drain() + } + s.conns[st] = true + return true +} + +func (s *Server) removeConn(st transport.ServerTransport) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, st) + s.cv.Broadcast() + } +} + +func (s *Server) channelzMetric() *channelz.ServerInternalMetric { + return &channelz.ServerInternalMetric{ + CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), + } +} + +func (s *Server) incrCallsStarted() { + atomic.AddInt64(&s.czData.callsStarted, 1) + atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (s *Server) incrCallsSucceeded() { + atomic.AddInt64(&s.czData.callsSucceeded, 1) +} + +func (s *Server) incrCallsFailed() { + atomic.AddInt64(&s.czData.callsFailed, 1) +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + channelz.Error(s.channelzID, "grpc: server failed to encode response: ", err) + return err + } + compData, err := compress(data, cp, comp) + if err != nil { + channelz.Error(s.channelzID, "grpc: server failed to compress response: ", err) + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) + if err == nil && s.opts.statsHandler != nil { + s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } + return err +} + +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { + // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainUnaryInts + if s.opts.unaryInt != nil { + interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) + } + + var chainedInt UnaryServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } + } + + s.opts.unaryInt = chainedInt +} + +// getChainUnaryHandler recursively generate the chained UnaryHandler +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { + sh := s.opts.statsHandler + if sh != nil || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { + s.incrCallsStarted() + } + var statsBegin *stats.Begin + if sh != nil { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + // The deferred error handling for tracing, stats handler and channelz are + // combined into one function to reduce stack usage -- a defer takes ~56-64 + // bytes on the stack, so overflowing the stack will require a stack + // re-allocation, which is expensive. + // + // To maintain behavior similar to separate deferred statements, statements + // should be executed in the reverse order. That is, tracing first, stats + // handler second, and channelz last. Note that panics *within* defers will + // lead to different behavior, but that's an acceptable compromise; that + // would be undefined behavior territory anyway. + defer func() { + if trInfo != nil { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + binlog := binarylog.GetMethodLogger(stream.Method()) + if binlog != nil { + ctx := stream.Context() + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ctx); ok { + logEntry.PeerAddr = peer.Addr + } + binlog.Log(logEntry) + } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp + stream.SetSendCompress(cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + stream.SetSendCompress(rc) + } + } + + var payInfo *payloadInfo + if sh != nil || binlog != nil { + payInfo = &payloadInfo{} + } + d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + if err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) + } + } + return err + } + if channelz.IsOn() { + t.IncrMsgRecv() + } + df := func(v interface{}) error { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if sh != nil { + sh.HandleRPC(stream.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + WireLength: payInfo.wireLength, + Data: d, + Length: len(d), + }) + } + if binlog != nil { + binlog.Log(&binarylog.ClientMessage{ + Message: d, + }) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(codes.Unknown, appErr.Error()) + appStatus, _ = status.FromError(appErr) + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + if binlog != nil { + if h, _ := stream.Header(); h.Len() > 0 { + // Only log serverHeader if there was header. Otherwise it can + // be trailer only. + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + } + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{Last: true} + + if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if sts, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, sts); e != nil { + channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err + } + if binlog != nil { + h, _ := stream.Header() + binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + binlog.Log(&binarylog.ServerMessage{ + Message: reply, + }) + } + if channelz.IsOn() { + t.IncrMsgSent() + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + err = t.WriteStatus(stream, statusOK) + if binlog != nil { + binlog.Log(&binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + }) + } + return err +} + +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { + // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainStreamInts + if s.opts.streamInt != nil { + interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) + } + + var chainedInt StreamServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } + } + + s.opts.streamInt = chainedInt +} + +// getChainStreamHandler recursively generate the chained StreamHandler +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(srv interface{}, ss ServerStream) error { + return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + } + sh := s.opts.statsHandler + var statsBegin *stats.Begin + if sh != nil { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), statsBegin) + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, + } + + if sh != nil || trInfo != nil || channelz.IsOn() { + // See comment in processUnaryRPC on defers. + defer func() { + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + ss.binlog = binarylog.GetMethodLogger(stream.Method()) + if ss.binlog != nil { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ss.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + ss.binlog.Log(logEntry) + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + stream.SetSendCompress(s.opts.cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + stream.SetSendCompress(rc) + } + } + + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + var appErr error + var server interface{} + if srv != nil { + server = srv.server + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + appStatus = status.New(codes.Unknown, appErr.Error()) + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + t.WriteStatus(ss.s, appStatus) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + err = t.WriteStatus(ss.s, statusOK) + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + }) + } + return err +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + + srv, knownService := s.m[service] + if knownService { + if md, ok := srv.md[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.sd[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + var errDesc string + if !knownService { + errDesc = fmt.Sprintf("unknown service %v", service) + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } + if trInfo != nil { + trInfo.tr.LazyPrintf("%s", errDesc) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// This API is EXPERIMENTAL. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// This API is EXPERIMENTAL. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// This API is EXPERIMENTAL. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.quit.Fire() + + defer func() { + s.serveWG.Wait() + s.done.Fire() + }() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + + s.mu.Lock() + listeners := s.lis + s.lis = nil + st := s.conns + s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Broadcast() + s.mu.Unlock() + + for lis := range listeners { + lis.Close() + } + for c := range st { + c.Close() + } + + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.quit.Fire() + defer s.done.Fire() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + return + } + + for lis := range s.lis { + lis.Close() + } + s.lis = nil + if !s.drain { + for st := range s.conns { + st.Drain() + } + s.drain = true + } + + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + s.mu.Lock() + + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec + } + if contentSubtype == "" { + return encoding.GetCodec(proto.Name) + } + codec := encoding.GetCodec(contentSubtype) + if codec == nil { + return encoding.GetCodec(proto.Name) + } + return codec +} + +// SetHeader sets the header metadata. +// When called multiple times, all the provided metadata will be merged. +// All the metadata will be sent out when one of the following happens: +// - grpc.SendHeader() is called; +// - The first response is sent out; +// - An RPC status is sent out (error or success). +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once. +// The provided md and headers set by SetHeader() will be sent. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + if err := stream.SendHeader(md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} + +type channelzServer struct { + s *Server +} + +func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 00000000..5a80a575 --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,434 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/serviceconfig" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + retryPolicy *retryPolicy +} + +type lbConfig struct { + name string + cfg serviceconfig.LoadBalancingConfig +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + serviceconfig.Config + + // LB is the load balancer the service providers recommends. The balancer + // specified via grpc.WithBalancer will override this. This is deprecated; + // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig + // will be used. + LB *string + + // lbConfig is the service config's load balancing configuration. If + // lbConfig and LB are both present, lbConfig will be used. + lbConfig *lbConfig + + // Methods contains a map for the methods in this service. If there is an + // exact match for a method (i.e. /service/method) in the map, use the + // corresponding MethodConfig. If there's no exact match, look for the + // default config for the service (/service/) and use the corresponding + // MethodConfig if it exists. Otherwise, the method has no MethodConfig to + // use. + Methods map[string]MethodConfig + + // If a retryThrottlingPolicy is provided, gRPC will automatically throttle + // retry attempts and hedged RPCs when the client’s ratio of failures to + // successes exceeds a threshold. + // + // For each server name, the gRPC client will maintain a token_count which is + // initially set to maxTokens, and can take values between 0 and maxTokens. + // + // Every outgoing RPC (regardless of service or method invoked) will change + // token_count as follows: + // + // - Every failed RPC will decrement the token_count by 1. + // - Every successful RPC will increment the token_count by tokenRatio. + // + // If token_count is less than or equal to maxTokens / 2, then RPCs will not + // be retried and hedged RPCs will not be sent. + retryThrottling *retryThrottlingPolicy + // healthCheckConfig must be set as one of the requirement to enable LB channel + // health check. + healthCheckConfig *healthCheckConfig + // rawJSONString stores service config json string that get parsed into + // this service config struct. + rawJSONString string +} + +// healthCheckConfig defines the go-native version of the LB channel health check config. +type healthCheckConfig struct { + // serviceName is the service name to use in the health-checking request. + ServiceName string +} + +// retryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + maxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + initialBackoff time.Duration + maxBackoff time.Duration + backoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + retryableStatusCodes map[codes.Code]bool +} + +type jsonRetryPolicy struct { + MaxAttempts int + InitialBackoff string + MaxBackoff string + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { + // The number of tokens starts at maxTokens. The token_count will always be + // between 0 and maxTokens. + // + // This field is required and must be greater than zero. + MaxTokens float64 + // The amount of tokens to add on each successful RPC. Typically this will + // be some number between 0 and 1, e.g., 0.1. + // + // This field is required and must be greater than zero. Up to 3 decimal + // places are supported. + TokenRatio float64 +} + +func parseDuration(s *string) (*time.Duration, error) { + if s == nil { + return nil, nil + } + if !strings.HasSuffix(*s, "s") { + return nil, fmt.Errorf("malformed duration %q", *s) + } + ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) + if len(ss) > 2 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var d time.Duration + if len(ss[0]) > 0 { + i, err := strconv.ParseInt(ss[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + d = time.Duration(i) * time.Second + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + f, err := strconv.ParseInt(ss[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + for i := 9; i > len(ss[1]); i-- { + f *= 10 + } + d += time.Duration(f) + hasDigits = true + } + if !hasDigits { + return nil, fmt.Errorf("malformed duration %q", *s) + } + + return &d, nil +} + +type jsonName struct { + Service *string + Method *string +} + +func (j jsonName) generatePath() (string, bool) { + if j.Service == nil { + return "", false + } + res := "/" + *j.Service + "/" + if j.Method != nil { + res += *j.Method + } + return res, true +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *string + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy +} + +type loadBalancingConfig map[string]json.RawMessage + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + LoadBalancingConfig *[]loadBalancingConfig + MethodConfig *[]jsonMC + RetryThrottling *retryThrottlingPolicy + HealthCheckConfig *healthCheckConfig +} + +func init() { + internal.ParseServiceConfigForTesting = parseServiceConfig +} +func parseServiceConfig(js string) *serviceconfig.ParseResult { + if len(js) == 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} + } + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + retryThrottling: rsc.RetryThrottling, + healthCheckConfig: rsc.HealthCheckConfig, + rawJSONString: js, + } + if rsc.LoadBalancingConfig != nil { + for i, lbcfg := range *rsc.LoadBalancingConfig { + if len(lbcfg) != 1 { + err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + grpclog.Warningf(err.Error()) + return &serviceconfig.ParseResult{Err: err} + } + var name string + var jsonCfg json.RawMessage + for name, jsonCfg = range lbcfg { + } + builder := balancer.Get(name) + if builder == nil { + continue + } + sc.lbConfig = &lbConfig{name: name} + if parser, ok := builder.(balancer.ConfigParser); ok { + var err error + sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg) + if err != nil { + return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)} + } + } else if string(jsonCfg) != "{}" { + grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + break + } + if sc.lbConfig == nil { + // We had a loadBalancingConfig field but did not encounter a + // supported policy. The config is considered invalid in this + // case. + err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found") + grpclog.Warningf(err.Error()) + return &serviceconfig.ParseResult{Err: err} + } + } + + if rsc.MethodConfig == nil { + return &serviceconfig.ParseResult{Config: &sc} + } + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + d, err := parseDuration(m.Timeout) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: d, + } + if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for _, n := range *m.Name { + if path, valid := n.generatePath(); valid { + sc.Methods[path] = mc + } + } + } + + if sc.retryThrottling != nil { + if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} + } + if tr := sc.retryThrottling.TokenRatio; tr <= 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} + } + } + return &serviceconfig.ParseResult{Config: &sc} +} + +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { + if jrp == nil { + return nil, nil + } + ib, err := parseDuration(&jrp.InitialBackoff) + if err != nil { + return nil, err + } + mb, err := parseDuration(&jrp.MaxBackoff) + if err != nil { + return nil, err + } + + if jrp.MaxAttempts <= 1 || + *ib <= 0 || + *mb <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil + } + + rp := &retryPolicy{ + maxAttempts: jrp.MaxAttempts, + initialBackoff: *ib, + maxBackoff: *mb, + backoffMultiplier: jrp.BackoffMultiplier, + retryableStatusCodes: make(map[codes.Code]bool), + } + if rp.maxAttempts > 5 { + // TODO(retry): Make the max maxAttempts configurable. + rp.maxAttempts = 5 + } + for _, code := range jrp.RetryableStatusCodes { + rp.retryableStatusCodes[code] = true + } + return rp, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go new file mode 100644 index 00000000..187c3044 --- /dev/null +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -0,0 +1,41 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig defines types and methods for operating on gRPC +// service configs. +// +// This package is EXPERIMENTAL. +package serviceconfig + +// Config represents an opaque data structure holding a service config. +type Config interface { + isServiceConfig() +} + +// LoadBalancingConfig represents an opaque data structure holding a load +// balancing config. +type LoadBalancingConfig interface { + isLoadBalancingConfig() +} + +// ParseResult contains a service config or an error. Exactly one must be +// non-nil. +type ParseResult struct { + Config Config + Err error +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 00000000..dc03731e --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + "net" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 00000000..a7970c79 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,314 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/metadata" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this InTrailer is from the client side. + Trailer metadata.MD +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + // + // Deprecated: This field is never set. The length is not known when this message is + // emitted because the trailer fields are compressed with hpack after that. + WireLength int + // Trailer contains the trailer metadata sent to the client. This + // field is only valid if this OutTrailer is from the server side. + Trailer metadata.MD +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time + // EndTime is the time when the RPC ends. + EndTime time.Time + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this End is from the client side. + // Deprecated: use Trailer in InTrailer instead. + Trailer metadata.MD + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 00000000..934ef683 --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,1528 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "io" + "math" + "strconv" + "sync" + "time" + + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. If a StreamHandler returns an error, it +// should be produced by the status package, or else gRPC will use +// codes.Unknown as the status code and err.Error() as the status message +// of the RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. +type StreamDesc struct { + StreamName string + Handler StreamHandler + + // At least one of these is true. + ServerStreams bool + ClientStreams bool +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// Deprecated: See ClientStream and ServerStream documentation instead. +type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. + SendMsg(m interface{}) error + // Deprecated: See ClientStream and ServerStream documentation instead. + RecvMsg(m interface{}) error +} + +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream is a wrapper for ClientConn.NewStream. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } + c := defaultCallInfo() + // Provide an opportunity for the first RPC to see the first service config + // provided by the resolver. + if err := cc.waitForResolvedAddrs(ctx); err != nil { + return nil, err + } + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + } + ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp) + sh := cc.dopts.copts.StatsHandler + var beginTime time.Time + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: c.failFast, + } + sh.HandleRPC(ctx, begin) + } + + cs := &clientStream{ + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + beginTime: beginTime, + firstAttempt: true, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } + cs.binlog = binarylog.GetMethodLogger(method) + + cs.callInfo.stream = cs + // Only this initial attempt has stats/tracing. + // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. + if err := cs.newAttemptLocked(sh, trInfo); err != nil { + cs.finish(err) + return nil, err + } + + op := func(a *csAttempt) error { return a.newStream() } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + cs.finish(err) + return nil, err + } + + if cs.binlog != nil { + md, _ := metadata.FromOutgoingContext(ctx) + logEntry := &binarylog.ClientHeader{ + OnClientSide: true, + Header: md, + MethodName: method, + Authority: cs.cc.authority, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + cs.binlog.Log(logEntry) + } + + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() + } + return cs, nil +} + +// newAttemptLocked creates a new attempt with a transport. +// If it succeeds, then it replaces clientStream's attempt with this new attempt. +func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) { + newAttempt := &csAttempt{ + cs: cs, + dc: cs.cc.dopts.dc, + statsHandler: sh, + trInfo: trInfo, + } + defer func() { + if retErr != nil { + // This attempt is not set in the clientStream, so it's finish won't + // be called. Call it here for stats and trace in case they are not + // nil. + newAttempt.finish(retErr) + } + }() + + if err := cs.ctx.Err(); err != nil { + return toRPCErr(err) + } + t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { + return err + } + if trInfo != nil { + trInfo.firstLine.SetRemoteAddr(t.RemoteAddr()) + } + newAttempt.t = t + newAttempt.done = done + cs.attempt = newAttempt + return nil +} + +func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries + s, err := a.t.NewStream(cs.ctx, cs.callHdr) + if err != nil { + return toRPCErr(err) + } + cs.attempt.s = s + cs.attempt.p = &parser{r: s} + return nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + callHdr *transport.CallHdr + opts []CallOption + callInfo *callInfo + cc *ClientConn + desc *StreamDesc + + codec baseCodec + cp Compressor + comp encoding.Compressor + + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + beginTime time.Time + + methodConfig *MethodConfig + + ctx context.Context // the application's context, wrapped by stats/tracing + + retryThrottler *retryThrottler // The throttler active when the RPC began. + + binlog *binarylog.MethodLogger // Binary logger, can be nil. + // serverHeaderBinlogged is a boolean for whether server header has been + // logged. Server header will be logged when the first time one of those + // happens: stream.Header(), stream.Recv(). + // + // It's only read and used by Recv() and Header(), so it doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + // attempt is the active client stream attempt. + // The only place where it is written is the newAttemptLocked method and this method never writes nil. + // So, attempt can be nil only inside newClientStream function when clientStream is first created. + // One of the first things done after clientStream's creation, is to call newAttemptLocked which either + // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, + // then newClientStream calls finish on the clientStream and returns. So, finish method is the only + // place where we need to check if the attempt is nil. + attempt *csAttempt + // TODO(hedging): hedging will have multiple attempts simultaneously. + committed bool // active attempt committed for retry? + buffer []func(a *csAttempt) error // operations to replay on retry + bufferSize int // current size of buffer +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + done func(balancer.DoneInfo) + + finished bool + dc Decompressor + decomp encoding.Compressor + decompSet bool + + mu sync.Mutex // guards trInfo.tr + // trInfo may be nil (if EnableTracing is false). + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. + trInfo *traceInfo + + statsHandler stats.Handler +} + +func (cs *clientStream) commitAttemptLocked() { + cs.committed = true + cs.buffer = nil +} + +func (cs *clientStream) commitAttempt() { + cs.mu.Lock() + cs.commitAttemptLocked() + cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation. +func (cs *clientStream) shouldRetry(err error) error { + if cs.attempt.s == nil && !cs.callInfo.failFast { + // In the event of any error from NewStream (attempt.s == nil), we + // never attempted to write anything to the wire, so we can retry + // indefinitely for non-fail-fast RPCs. + return nil + } + if cs.finished || cs.committed { + // RPC is finished or committed; cannot retry. + return err + } + // Wait for the trailers. + if cs.attempt.s != nil { + <-cs.attempt.s.Done() + } + if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { + // First attempt, stream unprocessed: transparently retry. + cs.firstAttempt = false + return nil + } + cs.firstAttempt = false + if cs.cc.dopts.disableRetry { + return err + } + + pushback := 0 + hasPushback := false + if cs.attempt.s != nil { + if !cs.attempt.s.TrailersOnly() { + return err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. + sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + channelz.Infof(cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + hasPushback = true + } else if len(sps) > 1 { + channelz.Warningf(cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + } + + var code codes.Code + if cs.attempt.s != nil { + code = cs.attempt.s.Status().Code() + } else { + code = status.Convert(err).Code() + } + + rp := cs.methodConfig.retryPolicy + if rp == nil || !rp.retryableStatusCodes[code] { + return err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { + return err + } + if cs.numRetries+1 >= rp.maxAttempts { + return err + } + + var dur time.Duration + if hasPushback { + dur = time.Millisecond * time.Duration(pushback) + cs.numRetriesSincePushback = 0 + } else { + fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.initialBackoff) * fact + if max := float64(rp.maxBackoff); cur > max { + cur = max + } + dur = time.Duration(grpcrand.Int63n(int64(cur))) + cs.numRetriesSincePushback++ + } + + // TODO(dfawley): we could eagerly fail here if dur puts us past the + // deadline, but unsure if it is worth doing. + t := time.NewTimer(dur) + select { + case <-t.C: + cs.numRetries++ + return nil + case <-cs.ctx.Done(): + t.Stop() + return status.FromContextError(cs.ctx.Err()).Err() + } +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(lastErr error) error { + for { + cs.attempt.finish(lastErr) + if err := cs.shouldRetry(lastErr); err != nil { + cs.commitAttemptLocked() + return err + } + if err := cs.newAttemptLocked(nil, nil); err != nil { + return err + } + if lastErr = cs.replayBufferLocked(); lastErr == nil { + return nil + } + } +} + +func (cs *clientStream) Context() context.Context { + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. + return cs.attempt.s.Context() +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { + cs.mu.Lock() + for { + if cs.committed { + cs.mu.Unlock() + return op(cs.attempt) + } + a := cs.attempt + cs.mu.Unlock() + err := op(a) + cs.mu.Lock() + if a != cs.attempt { + // We started another attempt already. + continue + } + if err == io.EOF { + <-a.s.Done() + } + if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + onSuccess() + cs.mu.Unlock() + return err + } + if err := cs.retryLocked(err); err != nil { + cs.mu.Unlock() + return err + } + } +} + +func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() + return toRPCErr(err) + }, cs.commitAttemptLocked) + if err != nil { + cs.finish(err) + return nil, err + } + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Only log if binary log is on and header has not been logged. + logEntry := &binarylog.ServerHeader{ + OnClientSide: true, + Header: m, + PeerAddr: nil, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + cs.serverHeaderBinlogged = true + } + return m, err +} + +func (cs *clientStream) Trailer() metadata.MD { + // On RPC failure, we never need to retry, because usage requires that + // RecvMsg() returned a non-nil error before calling this function is valid. + // We would have retried earlier if necessary. + // + // Commit the attempt anyway, just in case users are not following those + // directions -- it will prevent races and should not meaningfully impact + // performance. + cs.commitAttempt() + if cs.attempt.s == nil { + return nil + } + return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked() error { + a := cs.attempt + for _, f := range cs.buffer { + if err := f(a); err != nil { + return err + } + } + return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { + // Note: we still will buffer if retry is disabled (for transparent retries). + if cs.committed { + return + } + cs.bufferSize += sz + if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.commitAttemptLocked() + return + } + cs.buffer = append(cs.buffer, op) +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + cs.finish(err) + } + }() + if cs.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + } + msgBytes := data // Store the pointer before setting to nil. For binary logging. + op := func(a *csAttempt) error { + err := a.sendMsg(m, hdr, payload, data) + // nil out the message and uncomp when replaying; they are only needed for + // stats which is disabled for subsequent attempts. + m, data = nil, nil + return err + } + err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ClientMessage{ + OnClientSide: true, + Message: msgBytes, + }) + } + return +} + +func (cs *clientStream) RecvMsg(m interface{}) error { + if cs.binlog != nil && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() + } + var recvInfo *payloadInfo + if cs.binlog != nil { + recvInfo = &payloadInfo{} + } + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m, recvInfo) + }, cs.commitAttemptLocked) + if cs.binlog != nil && err == nil { + cs.binlog.Log(&binarylog.ServerMessage{ + OnClientSide: true, + Message: recvInfo.uncompressedBytes, + }) + } + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + + if cs.binlog != nil { + // finish will not log Trailer. Log Trailer here. + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if logEntry.Err == io.EOF { + logEntry.Err = nil + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.binlog.Log(logEntry) + } + } + return err +} + +func (cs *clientStream) CloseSend() error { + if cs.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + cs.sentLast = true + op := func(a *csAttempt) error { + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil + } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + if cs.binlog != nil { + cs.binlog.Log(&binarylog.ClientHalfClose{ + OnClientSide: true, + }) + } + // We never returned an error here for reasons. + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { + cs.mu.Unlock() + return + } + cs.finished = true + cs.commitAttemptLocked() + cs.mu.Unlock() + // For binary logging. only log cancel in finish (could be caused by RPC ctx + // canceled or ClientConn closed). Trailer will be logged in RecvMsg. + // + // Only one of cancel or trailer needs to be logged. In the cases where + // users don't call RecvMsg, users must have already canceled the RPC. + if cs.binlog != nil && status.Code(err) == codes.Canceled { + cs.binlog.Log(&binarylog.Cancel{ + OnClientSide: true, + }) + } + if err == nil { + cs.retryThrottler.successfulRPC() + } + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo) + } + } + } + cs.cancel() +} + +func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { + cs := a.cs + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + a.mu.Unlock() + } + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if !cs.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil +} + +func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { + cs := a.cs + if a.statsHandler != nil && payInfo == nil { + payInfo = &payloadInfo{} + } + + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + a.dc = nil + } + // Only initialize this state once per stream. + a.decompSet = true + } + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) + if err != nil { + if err == io.EOF { + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + a.mu.Unlock() + } + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{ + Client: true, + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength, + Length: len(payInfo.uncompressedBytes), + }) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + if a.finished { + a.mu.Unlock() + return + } + a.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + var tr metadata.MD + if a.s != nil { + a.t.CloseStream(a.s, err) + tr = a.s.Trailer() + } + + if a.done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } + a.done(balancer.DoneInfo{ + Err: err, + Trailer: tr, + BytesSent: a.s != nil, + BytesReceived: br, + ServerLoad: balancerload.Parse(tr), + }) + } + if a.statsHandler != nil { + end := &stats.End{ + Client: true, + BeginTime: a.cs.beginTime, + EndTime: time.Now(), + Trailer: tr, + Error: err, + } + a.statsHandler.HandleRPC(a.cs.ctx, end) + } + if a.trInfo != nil && a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() + } + a.trInfo.tr.Finish() + a.trInfo.tr = nil + } + a.mu.Unlock() +} + +// newClientStream creates a ClientStream with the specified transport, on the +// given addrConn. +// +// It's expected that the given transport is either the same one in addrConn, or +// is already closed. To avoid race, transport is specified separately, instead +// of using ac.transpot. +// +// Main difference between this and ClientConn.NewStream: +// - no retry +// - no service config (or wait for service config) +// - no tracing or stats +func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { + if t == nil { + // TODO: return RPC error here? + return nil, errors.New("transport provided is nil") + } + // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. + c := &callInfo{} + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: ac.cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if ac.cc.dopts.cp != nil { + callHdr.SendCompress = ac.cc.dopts.cp.Type() + cp = ac.cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + // Use a special addrConnStream to avoid retry. + as := &addrConnStream{ + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + t: t, + } + + as.callInfo.stream = as + s, err := as.t.NewStream(as.ctx, as.callHdr) + if err != nil { + err = toRPCErr(err) + return nil, err + } + as.s = s + as.p = &parser{r: s} + ac.incrCallsStarted() + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-ac.ctx.Done(): + as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) + case <-ctx.Done(): + as.finish(toRPCErr(ctx.Err())) + } + }() + } + return as, nil +} + +type addrConnStream struct { + s *transport.Stream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + t transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + cp Compressor + comp encoding.Compressor + decompSet bool + dc Decompressor + decomp encoding.Compressor + p *parser + mu sync.Mutex + finished bool +} + +func (as *addrConnStream) Header() (metadata.MD, error) { + m, err := as.s.Header() + if err != nil { + as.finish(toRPCErr(err)) + } + return m, err +} + +func (as *addrConnStream) Trailer() metadata.MD { + return as.s.Trailer() +} + +func (as *addrConnStream) CloseSend() error { + if as.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + as.sentLast = true + + as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil +} + +func (as *addrConnStream) Context() context.Context { + return as.s.Context() +} + +func (as *addrConnStream) SendMsg(m interface{}) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + as.finish(err) + } + }() + if as.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !as.desc.ClientStreams { + as.sentLast = true + } + + // load hdr, payload, data + hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payld) > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + } + + if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if !as.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + + if channelz.IsOn() { + as.t.IncrMsgSent() + } + return nil +} + +func (as *addrConnStream) RecvMsg(m interface{}) (err error) { + defer func() { + if err != nil || !as.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + as.finish(err) + } + }() + + if !as.decompSet { + // Block until we receive headers containing received message encoding. + if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.dc == nil || as.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + as.dc = nil + as.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + as.dc = nil + } + // Only initialize this state once per stream. + as.decompSet = true + } + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err != nil { + if err == io.EOF { + if statusErr := as.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + + if channelz.IsOn() { + as.t.IncrMsgRecv() + } + if as.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return as.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (as *addrConnStream) finish(err error) { + as.mu.Lock() + if as.finished { + as.mu.Unlock() + return + } + as.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + if as.s != nil { + as.t.CloseStream(as.s, err) + } + + if err != nil { + as.ac.incrCallsFailed() + } else { + as.ac.incrCallsSucceeded() + } + as.cancel() + as.mu.Unlock() +} + +// ServerStream defines the server-side behavior of a streaming RPC. +// +// All errors returned from ServerStream methods are compatible with the +// status package. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + // Context returns the context for this stream. + Context() context.Context + // SendMsg sends a message. On error, SendMsg aborts the stream and the + // error is returned directly. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the client. An + // untimely stream closure may result in lost messages. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// serverStream implements a server side Stream. +type serverStream struct { + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler stats.Handler + + binlog *binarylog.MethodLogger + // serverHeaderBinlogged indicates whether server header has been logged. It + // will happen when one of the following two happens: stream.SendHeader(), + // stream.Send(). + // + // It's only checked in send and sendHeader, doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + err := ss.t.WriteHeader(ss.s, md) + if ss.binlog != nil && !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + return err +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } + }() + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + if ss.binlog != nil { + if !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + ss.binlog.Log(&binarylog.ServerHeader{ + Header: h, + }) + ss.serverHeaderBinlogged = true + } + ss.binlog.Log(&binarylog.ServerMessage{ + Message: data, + }) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } + return nil +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } + }() + var payInfo *payloadInfo + if ss.statsHandler != nil || ss.binlog != nil { + payInfo = &payloadInfo{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err == io.EOF { + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientHalfClose{}) + } + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if ss.statsHandler != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.wireLength, + Length: len(payInfo.uncompressedBytes), + }) + } + if ss.binlog != nil { + ss.binlog.Log(&binarylog.ClientMessage{ + Message: payInfo.uncompressedBytes, + }) + } + return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} + +// prepareMsg returns the hdr, payload and data +// using the compressors passed or using the +// passed preparedmsg +func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + } + // The input interface is not a prepared msg. + // Marshal and Compress the data at this point + data, err = encode(codec, m) + if err != nil { + return nil, nil, nil, err + } + compData, err := compress(data, cp, comp) + if err != nil { + return nil, nil, nil, err + } + hdr, payload = msgHeader(data, compData) + return hdr, payload, data, nil +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 00000000..584360f6 --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL. +package tap + +import ( + "context" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is created +// on the server side. If it returns a non-nil error, the stream will not be +// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. +// The client will receive an RPC error "code = Unavailable, desc = stream +// terminated by RST_STREAM with error code: REFUSED_STREAM". +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). And the content of +// the error will be ignored and won't be sent back to the client. For other +// general usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 00000000..07a2d26b --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +// It may be mutated after construction; remoteAddr specifically may change +// during client-side use. +type firstLine struct { + mu sync.Mutex + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) SetRemoteAddr(addr net.Addr) { + f.mu.Lock() + f.remoteAddr = addr + f.mu.Unlock() +} + +func (f *firstLine) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go new file mode 100644 index 00000000..ca5d55fd --- /dev/null +++ b/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.29.1" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100644 index 00000000..e12024fb --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,163 @@ +#!/bin/bash + +set -ex # Exit on error; debugging enabled. +set -o pipefail # Fail a pipe if any sub-command fails. + +# not makes sure the command passed to it does not exit with a return code of 0. +not() { + # This is required instead of the earlier (! $COMMAND) because subshells and + # pipefail don't work the same on Darwin as in Linux. + ! "$@" +} + +die() { + echo "$@" >&2 + exit 1 +} + +fail_on_output() { + tee /dev/stderr | not read +} + +# Check to make sure it's safe to modify the user's git repo. +git status --porcelain | fail_on_output + +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT + +PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" + +if [[ "$1" = "-install" ]]; then + # Check for module support + if go help mod >& /dev/null; then + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell \ + github.com/golang/protobuf/protoc-gen-go + popd + else + # Ye olde `go get` incantation. + # Note: this gets the latest version of all tools (vs. the pinned versions + # with Go modules). + go get -u \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell \ + github.com/golang/protobuf/protoc-gen-go + fi + if [[ -z "${VET_SKIP_PROTO}" ]]; then + if [[ "${TRAVIS}" = "true" ]]; then + PROTOBUF_VERSION=3.3.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/travis + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif not which protoc > /dev/null; then + die "Please install protoc into your path" + fi + fi + exit 0 +elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" +fi + +# - Ensure all source files contain a copyright message. +not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' + +# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. +not grep 'func Test[^(]' *_test.go +not grep 'func Test[^(]' test/*.go + +# - Do not import x/net/context. +not git grep -l 'x/net/context' -- "*.go" + +# - Do not import math/rand for real library code. Use internal/grpcrand for +# thread safety. +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' + +# - Ensure all ptypes proto packages are renamed when importing. +not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" + +# - Check imports that are illegal in appengine (until Go 1.11). +# TODO: Remove when we drop Go 1.10 support +go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go + +# - gofmt, goimports, golint (with exceptions for generated code), go vet. +gofmt -s -d -l . 2>&1 | fail_on_output +goimports -l . 2>&1 | not grep -vE "(_mock|\.pb)\.go" +golint ./... 2>&1 | not grep -vE "(_mock|\.pb)\.go:" +go vet -all ./... + +misspell -error . + +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + PATH="/home/travis/bin:${PATH}" make proto && \ + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +# - Check that our module is tidy. +if go help mod >& /dev/null; then + go mod tidy && \ + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +# - Collection of static analysis checks +# +# TODO(dfawley): don't use deprecated functions in examples or first-party +# plugins. +SC_OUT="$(mktemp)" +staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +# Error if anything other than deprecation warnings are printed. +not grep -v "is deprecated:.*SA1019" "${SC_OUT}" +# Only ignore the following deprecated types/fields/functions. +not grep -Fv '.HandleResolvedAddrs +.HandleSubConnStateChange +.HeaderMap +.NewAddress +.NewServiceConfig +.Metadata is deprecated: use Attributes +.Type is deprecated: use Attributes +.UpdateBalancerState +balancer.Picker +grpc.CallCustomCodec +grpc.Code +grpc.Compressor +grpc.Decompressor +grpc.MaxMsgSize +grpc.MethodConfig +grpc.NewGZIPCompressor +grpc.NewGZIPDecompressor +grpc.RPCCompressor +grpc.RPCDecompressor +grpc.RoundRobin +grpc.ServiceConfig +grpc.WithBalancer +grpc.WithBalancerName +grpc.WithCompressor +grpc.WithDecompressor +grpc.WithDialer +grpc.WithMaxMsgSize +grpc.WithServiceConfig +grpc.WithTimeout +http.CloseNotifier +info.SecurityVersion +naming.Resolver +naming.Update +naming.Watcher +resolver.Backend +resolver.GRPCLB' "${SC_OUT}" + +echo SUCCESS diff --git a/vendor/modules.txt b/vendor/modules.txt index 8ede9853..71662633 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -3,6 +3,8 @@ github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm # github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml +# github.com/DataDog/zstd v1.4.4 +## explicit # github.com/Masterminds/goutils v1.1.0 github.com/Masterminds/goutils # github.com/Masterminds/semver/v3 v3.1.0 @@ -32,12 +34,14 @@ github.com/Microsoft/hcsshim/internal/vmcompute github.com/Microsoft/hcsshim/internal/wclayer github.com/Microsoft/hcsshim/osversion # github.com/Sabayon/pkgs-checker v0.7.2 +## explicit github.com/Sabayon/pkgs-checker/pkg/gentoo # github.com/apex/log v1.1.1 github.com/apex/log # github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 github.com/asaskevich/govalidator # github.com/asdine/storm v0.0.0-20190418133842-e0f77eada154 +## explicit github.com/asdine/storm github.com/asdine/storm/codec github.com/asdine/storm/codec/json @@ -45,35 +49,94 @@ github.com/asdine/storm/index github.com/asdine/storm/internal github.com/asdine/storm/q # github.com/briandowns/spinner v1.7.0 +## explicit github.com/briandowns/spinner # github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec +## explicit github.com/cavaliercoder/grab github.com/cavaliercoder/grab/bps # github.com/chuckpreslar/emission v0.0.0-20170206194824-a7ddd980baf9 github.com/chuckpreslar/emission # github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 github.com/codegangsta/inject -# github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f +# github.com/containerd/cgroups v0.0.0-20200217135630-d732e370d46d github.com/containerd/cgroups/stats/v1 -# github.com/containerd/containerd v1.3.4 +# github.com/containerd/console v0.0.0-20191219165238-8375c3424e4d +github.com/containerd/console +# github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc => github.com/containerd/containerd v1.3.1-0.20200227195959-4d242818bf55 +## explicit +github.com/containerd/containerd/api/services/content/v1 github.com/containerd/containerd/archive +github.com/containerd/containerd/archive/compression +github.com/containerd/containerd/containers +github.com/containerd/containerd/content +github.com/containerd/containerd/content/local +github.com/containerd/containerd/content/proxy +github.com/containerd/containerd/contrib/seccomp +github.com/containerd/containerd/diff +github.com/containerd/containerd/diff/apply +github.com/containerd/containerd/diff/walking github.com/containerd/containerd/errdefs +github.com/containerd/containerd/events +github.com/containerd/containerd/events/exchange +github.com/containerd/containerd/filters +github.com/containerd/containerd/gc +github.com/containerd/containerd/identifiers +github.com/containerd/containerd/images +github.com/containerd/containerd/images/archive +github.com/containerd/containerd/labels +github.com/containerd/containerd/leases github.com/containerd/containerd/log +github.com/containerd/containerd/metadata +github.com/containerd/containerd/metadata/boltutil +github.com/containerd/containerd/mount +github.com/containerd/containerd/namespaces +github.com/containerd/containerd/oci +github.com/containerd/containerd/platforms +github.com/containerd/containerd/plugin +github.com/containerd/containerd/reference +github.com/containerd/containerd/reference/docker +github.com/containerd/containerd/remotes +github.com/containerd/containerd/remotes/docker +github.com/containerd/containerd/remotes/docker/schema1 +github.com/containerd/containerd/rootfs +github.com/containerd/containerd/services/content/contentserver +github.com/containerd/containerd/snapshots +github.com/containerd/containerd/snapshots/native +github.com/containerd/containerd/snapshots/overlay +github.com/containerd/containerd/snapshots/storage github.com/containerd/containerd/sys +github.com/containerd/containerd/version # github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c github.com/containerd/continuity/fs github.com/containerd/continuity/syscallx github.com/containerd/continuity/sysx +# github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75 +github.com/containerd/go-cni +# github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328 +github.com/containerd/go-runc +# github.com/containerd/ttrpc v0.0.0-20200121165050-0be804eadb15 +github.com/containerd/ttrpc +# github.com/containerd/typeurl v0.0.0-20200205145503-b45ef1f1f737 +github.com/containerd/typeurl +# github.com/containernetworking/cni v0.7.1 +github.com/containernetworking/cni/libcni +github.com/containernetworking/cni/pkg/invoke +github.com/containernetworking/cni/pkg/types +github.com/containernetworking/cni/pkg/types/020 +github.com/containernetworking/cni/pkg/types/current +github.com/containernetworking/cni/pkg/version # github.com/cpuguy83/go-md2man/v2 v2.0.0 github.com/cpuguy83/go-md2man/v2/md2man # github.com/crillab/gophersat v1.3.2-0.20201023142334-3fc2ac466765 +## explicit github.com/crillab/gophersat/bf github.com/crillab/gophersat/solver # github.com/cyphar/filepath-securejoin v0.2.2 github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew -# github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492 +# github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24 github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile github.com/docker/cli/cli/config/credentials @@ -87,7 +150,8 @@ github.com/docker/distribution/manifest/schema2 github.com/docker/distribution/reference github.com/docker/distribution/registry/api/errcode github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v17.12.0-ce-rc1.0.20200417035958-130b0bc6032c+incompatible => github.com/Luet-lab/moby v17.12.0-ce-rc1.0.20200605210607-749178b8f80d+incompatible +# github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible => github.com/Luet-lab/moby v17.12.0-ce-rc1.0.20200605210607-749178b8f80d+incompatible +## explicit github.com/docker/docker/api/types/blkiodev github.com/docker/docker/api/types/container github.com/docker/docker/api/types/filters @@ -98,15 +162,20 @@ github.com/docker/docker/api/types/strslice github.com/docker/docker/api/types/swarm github.com/docker/docker/api/types/swarm/runtime github.com/docker/docker/api/types/versions +github.com/docker/docker/builder/dockerignore github.com/docker/docker/errdefs github.com/docker/docker/pkg/archive +github.com/docker/docker/pkg/chrootarchive github.com/docker/docker/pkg/fileutils github.com/docker/docker/pkg/homedir github.com/docker/docker/pkg/idtools github.com/docker/docker/pkg/ioutils github.com/docker/docker/pkg/jsonmessage +github.com/docker/docker/pkg/locker github.com/docker/docker/pkg/longpath github.com/docker/docker/pkg/pools +github.com/docker/docker/pkg/reexec +github.com/docker/docker/pkg/signal github.com/docker/docker/pkg/stdcopy github.com/docker/docker/pkg/system github.com/docker/docker/pkg/term @@ -116,19 +185,33 @@ github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials # github.com/docker/go-connections v0.4.0 github.com/docker/go-connections/nat +# github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c +github.com/docker/go-events # github.com/docker/go-units v0.4.0 +## explicit github.com/docker/go-units +# github.com/docker/libnetwork v0.8.0-dev.2.0.20200226230617-d8334ccdb9be +github.com/docker/libnetwork/resolvconf +github.com/docker/libnetwork/resolvconf/dns +github.com/docker/libnetwork/types # github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 github.com/docker/libtrust # github.com/ecooper/qlearning v0.0.0-20160612200101-3075011a69fd +## explicit github.com/ecooper/qlearning # github.com/fatih/color v1.7.0 github.com/fatih/color # github.com/fsnotify/fsnotify v1.4.9 github.com/fsnotify/fsnotify # github.com/fsouza/go-dockerclient v1.6.4 +## explicit github.com/fsouza/go-dockerclient +# github.com/genuinetools/img v0.5.11 +## explicit +github.com/genuinetools/img/client +github.com/genuinetools/img/types # github.com/ghodss/yaml v1.0.0 +## explicit github.com/ghodss/yaml # github.com/go-openapi/errors v0.19.2 github.com/go-openapi/errors @@ -145,11 +228,16 @@ github.com/gobwas/glob/syntax/ast github.com/gobwas/glob/syntax/lexer github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings +# github.com/gofrs/flock v0.7.1 +github.com/gofrs/flock +# github.com/gogo/googleapis v1.3.2 +github.com/gogo/googleapis/google/rpc # github.com/gogo/protobuf v1.3.1 github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys +github.com/gogo/protobuf/types # github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e github.com/golang/groupcache/lru # github.com/golang/protobuf v1.4.2 @@ -159,6 +247,7 @@ github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp # github.com/google/go-containerregistry v0.2.1 +## explicit github.com/google/go-containerregistry/pkg/authn github.com/google/go-containerregistry/pkg/crane github.com/google/go-containerregistry/pkg/internal/legacy @@ -183,18 +272,28 @@ github.com/google/go-containerregistry/pkg/v1/types github.com/google/go-containerregistry/pkg/v1/v1util # github.com/google/gofuzz v1.1.0 github.com/google/gofuzz +# github.com/google/shlex v0.0.0-20150127133951-6f45313302b9 +github.com/google/shlex # github.com/google/uuid v1.1.1 github.com/google/uuid # github.com/googleapis/gnostic v0.2.2 github.com/googleapis/gnostic/OpenAPIv2 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions +# github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 +github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc # github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/errwrap +# github.com/hashicorp/go-immutable-radix v1.0.0 => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe +github.com/hashicorp/go-immutable-radix # github.com/hashicorp/go-multierror v1.0.0 +## explicit github.com/hashicorp/go-multierror # github.com/hashicorp/go-version v1.2.0 +## explicit github.com/hashicorp/go-version +# github.com/hashicorp/golang-lru v0.5.3 +github.com/hashicorp/golang-lru/simplelru # github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hcl github.com/hashicorp/hcl/hcl/ast @@ -214,17 +313,23 @@ github.com/huandu/xstrings github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.0.0 github.com/inconshreveable/mousetrap +# github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 +github.com/ishidawataru/sctp # github.com/jedib0t/go-pretty v4.3.0+incompatible +## explicit github.com/jedib0t/go-pretty/table github.com/jedib0t/go-pretty/text # github.com/jedib0t/go-pretty/v6 v6.0.5 +## explicit github.com/jedib0t/go-pretty/v6/list github.com/jedib0t/go-pretty/v6/text # github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3 +## explicit github.com/jinzhu/copier # github.com/json-iterator/go v1.1.9 github.com/json-iterator/go # github.com/klauspost/compress v1.8.3 +## explicit github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -234,18 +339,23 @@ github.com/klauspost/compress/zstd/internal/xxhash # github.com/klauspost/cpuid v1.2.1 github.com/klauspost/cpuid # github.com/klauspost/pgzip v1.2.1 +## explicit github.com/klauspost/pgzip # github.com/knqyf263/go-deb-version v0.0.0-20190517075300-09fca494f03d +## explicit github.com/knqyf263/go-deb-version # github.com/konsorten/go-windows-terminal-sequences v1.0.3 github.com/konsorten/go-windows-terminal-sequences # github.com/kyokomi/emoji v2.1.0+incompatible +## explicit github.com/kyokomi/emoji # github.com/logrusorgru/aurora v0.0.0-20190417123914-21d75270181e +## explicit github.com/logrusorgru/aurora # github.com/magiconair/properties v1.8.1 github.com/magiconair/properties # github.com/marcsauter/single v0.0.0-20181104081128-f8bf46f26ec0 +## explicit github.com/marcsauter/single # github.com/mattn/go-colorable v0.1.2 github.com/mattn/go-colorable @@ -257,13 +367,114 @@ github.com/mattn/go-runewidth github.com/mitchellh/colorstring # github.com/mitchellh/copystructure v1.0.0 github.com/mitchellh/copystructure +# github.com/mitchellh/hashstructure v1.0.0 +github.com/mitchellh/hashstructure # github.com/mitchellh/mapstructure v1.1.2 github.com/mitchellh/mapstructure # github.com/mitchellh/reflectwalk v1.0.0 github.com/mitchellh/reflectwalk -# github.com/moby/sys/mount v0.1.1-0.20200320164225-6154f11e6840 +# github.com/moby/buildkit v0.7.2 +## explicit +github.com/moby/buildkit/api/services/control +github.com/moby/buildkit/api/types +github.com/moby/buildkit/cache +github.com/moby/buildkit/cache/blobs +github.com/moby/buildkit/cache/contenthash +github.com/moby/buildkit/cache/metadata +github.com/moby/buildkit/cache/remotecache +github.com/moby/buildkit/cache/remotecache/inline +github.com/moby/buildkit/cache/remotecache/local +github.com/moby/buildkit/cache/remotecache/registry +github.com/moby/buildkit/cache/remotecache/v1 +github.com/moby/buildkit/cache/util +github.com/moby/buildkit/client +github.com/moby/buildkit/client/buildid +github.com/moby/buildkit/client/connhelper +github.com/moby/buildkit/client/llb +github.com/moby/buildkit/client/llb/imagemetaresolver +github.com/moby/buildkit/client/ociindex +github.com/moby/buildkit/cmd/buildkitd/config +github.com/moby/buildkit/control +github.com/moby/buildkit/control/gateway +github.com/moby/buildkit/executor +github.com/moby/buildkit/executor/oci +github.com/moby/buildkit/executor/runcexecutor +github.com/moby/buildkit/exporter +github.com/moby/buildkit/exporter/containerimage +github.com/moby/buildkit/exporter/containerimage/exptypes +github.com/moby/buildkit/exporter/local +github.com/moby/buildkit/exporter/oci +github.com/moby/buildkit/exporter/tar +github.com/moby/buildkit/frontend +github.com/moby/buildkit/frontend/dockerfile/builder +github.com/moby/buildkit/frontend/dockerfile/command +github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb +github.com/moby/buildkit/frontend/dockerfile/instructions +github.com/moby/buildkit/frontend/dockerfile/parser +github.com/moby/buildkit/frontend/dockerfile/shell +github.com/moby/buildkit/frontend/gateway +github.com/moby/buildkit/frontend/gateway/client +github.com/moby/buildkit/frontend/gateway/forwarder +github.com/moby/buildkit/frontend/gateway/grpcclient +github.com/moby/buildkit/frontend/gateway/pb +github.com/moby/buildkit/identity +github.com/moby/buildkit/session +github.com/moby/buildkit/session/auth +github.com/moby/buildkit/session/auth/authprovider +github.com/moby/buildkit/session/content +github.com/moby/buildkit/session/filesync +github.com/moby/buildkit/session/grpchijack +github.com/moby/buildkit/session/secrets +github.com/moby/buildkit/session/sshforward +github.com/moby/buildkit/session/testutil +github.com/moby/buildkit/session/upload +github.com/moby/buildkit/snapshot +github.com/moby/buildkit/snapshot/containerd +github.com/moby/buildkit/snapshot/imagerefchecker +github.com/moby/buildkit/solver +github.com/moby/buildkit/solver/bboltcachestorage +github.com/moby/buildkit/solver/internal/pipe +github.com/moby/buildkit/solver/llbsolver +github.com/moby/buildkit/solver/llbsolver/file +github.com/moby/buildkit/solver/llbsolver/ops +github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes +github.com/moby/buildkit/solver/pb +github.com/moby/buildkit/source +github.com/moby/buildkit/source/containerimage +github.com/moby/buildkit/source/git +github.com/moby/buildkit/source/http +github.com/moby/buildkit/source/local +github.com/moby/buildkit/util/apicaps +github.com/moby/buildkit/util/apicaps/pb +github.com/moby/buildkit/util/appcontext +github.com/moby/buildkit/util/appdefaults +github.com/moby/buildkit/util/binfmt_misc +github.com/moby/buildkit/util/cond +github.com/moby/buildkit/util/contentutil +github.com/moby/buildkit/util/entitlements +github.com/moby/buildkit/util/entitlements/security +github.com/moby/buildkit/util/flightcontrol +github.com/moby/buildkit/util/imageutil +github.com/moby/buildkit/util/leaseutil +github.com/moby/buildkit/util/network +github.com/moby/buildkit/util/network/cniprovider +github.com/moby/buildkit/util/network/netproviders +github.com/moby/buildkit/util/progress +github.com/moby/buildkit/util/progress/logs +github.com/moby/buildkit/util/pull +github.com/moby/buildkit/util/push +github.com/moby/buildkit/util/resolver +github.com/moby/buildkit/util/rootless/specconv +github.com/moby/buildkit/util/system +github.com/moby/buildkit/util/throttle +github.com/moby/buildkit/util/tracing +github.com/moby/buildkit/util/winlayers +github.com/moby/buildkit/worker +github.com/moby/buildkit/worker/base +# github.com/moby/sys/mount v0.2.0 +## explicit github.com/moby/sys/mount -# github.com/moby/sys/mountinfo v0.1.0 +# github.com/moby/sys/mountinfo v0.4.0 github.com/moby/sys/mountinfo # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd github.com/modern-go/concurrent @@ -272,12 +483,16 @@ github.com/modern-go/reflect2 # github.com/morikuni/aec v1.0.0 github.com/morikuni/aec # github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d +## explicit github.com/mudler/cobra-extensions # github.com/mudler/docker-companion v0.4.6-0.20200418093252-41846f112d87 +## explicit github.com/mudler/docker-companion/api # github.com/mudler/go-pluggable v0.0.0-20201113184918-d36448fc8f82 +## explicit github.com/mudler/go-pluggable # github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290 +## explicit github.com/mudler/topsort # github.com/nxadm/tail v1.4.4 github.com/nxadm/tail @@ -286,6 +501,7 @@ github.com/nxadm/tail/util github.com/nxadm/tail/watch github.com/nxadm/tail/winfile # github.com/onsi/ginkgo v1.14.2 +## explicit github.com/onsi/ginkgo github.com/onsi/ginkgo/config github.com/onsi/ginkgo/internal/codelocation @@ -306,6 +522,7 @@ github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types # github.com/onsi/gomega v1.10.3 +## explicit github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/internal/assertion @@ -336,30 +553,42 @@ github.com/openSUSE/umoci/third_party/user # github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/go-digest # github.com/opencontainers/image-spec v1.0.1 +github.com/opencontainers/image-spec/identity github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v0.1.1 +# github.com/opencontainers/runc v1.0.0-rc9.0.20200221051241-688cf6d43cc4 => github.com/opencontainers/runc v1.0.0-rc9.0.20200221051241-688cf6d43cc4 github.com/opencontainers/runc/libcontainer/system github.com/opencontainers/runc/libcontainer/user # github.com/opencontainers/runtime-spec v1.0.1 github.com/opencontainers/runtime-spec/specs-go +# github.com/opentracing-contrib/go-stdlib v0.0.0-20180702182724-07a764486eb1 +github.com/opentracing-contrib/go-stdlib/nethttp +# github.com/opentracing/opentracing-go v1.1.0 +github.com/opentracing/opentracing-go +github.com/opentracing/opentracing-go/ext +github.com/opentracing/opentracing-go/log # github.com/otiai10/copy v1.2.1-0.20200916181228-26f84a0b1578 +## explicit github.com/otiai10/copy -# github.com/pelletier/go-toml v1.6.0 +# github.com/pelletier/go-toml v1.2.0 github.com/pelletier/go-toml # github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f +## explicit github.com/philopon/go-toposort # github.com/pkg/errors v0.9.1 +## explicit github.com/pkg/errors # github.com/rootless-containers/proto v0.1.0 github.com/rootless-containers/proto/go-proto # github.com/russross/blackfriday/v2 v2.0.1 github.com/russross/blackfriday/v2 # github.com/schollz/progressbar/v3 v3.7.1 +## explicit github.com/schollz/progressbar/v3 # github.com/shurcooL/sanitized_anchor_name v1.0.0 github.com/shurcooL/sanitized_anchor_name # github.com/sirupsen/logrus v1.6.0 +## explicit github.com/sirupsen/logrus # github.com/spf13/afero v1.2.2 github.com/spf13/afero @@ -367,16 +596,24 @@ github.com/spf13/afero/mem # github.com/spf13/cast v1.3.1 github.com/spf13/cast # github.com/spf13/cobra v1.0.0 +## explicit github.com/spf13/cobra # github.com/spf13/jwalterweatherman v1.1.0 github.com/spf13/jwalterweatherman # github.com/spf13/pflag v1.0.5 github.com/spf13/pflag -# github.com/spf13/viper v1.6.3 +# github.com/spf13/viper v1.7.0 +## explicit github.com/spf13/viper # github.com/subosito/gotenv v1.2.0 github.com/subosito/gotenv -# github.com/urfave/cli v1.22.1 +# github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 +github.com/syndtr/gocapability/capability +# github.com/tonistiigi/fsutil v0.0.0-20200326231323-c2c7d7b0e144 +github.com/tonistiigi/fsutil +github.com/tonistiigi/fsutil/copy +github.com/tonistiigi/fsutil/types +# github.com/urfave/cli v1.22.2 github.com/urfave/cli # github.com/vbatts/go-mtree v0.4.4 github.com/vbatts/go-mtree @@ -388,7 +625,8 @@ github.com/xeipuuv/gojsonpointer github.com/xeipuuv/gojsonreference # github.com/xeipuuv/gojsonschema v1.2.0 github.com/xeipuuv/gojsonschema -# go.etcd.io/bbolt v1.3.4 +# go.etcd.io/bbolt v1.3.5 +## explicit go.etcd.io/bbolt # go.mongodb.org/mongo-driver v1.1.2 go.mongodb.org/mongo-driver/bson @@ -404,12 +642,15 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate # go.uber.org/atomic v1.5.1 +## explicit go.uber.org/atomic # go.uber.org/multierr v1.4.0 +## explicit go.uber.org/multierr # go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee go.uber.org/tools/update-license # go.uber.org/zap v1.13.0 +## explicit go.uber.org/zap go.uber.org/zap/buffer go.uber.org/zap/internal/bufferpool @@ -438,11 +679,15 @@ golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna +golang.org/x/net/internal/timeseries +golang.org/x/net/trace # golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 +## explicit golang.org/x/sync/errgroup +golang.org/x/sync/semaphore # golang.org/x/sys v0.0.0-20201113135734-0a15ea8d9b02 golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix @@ -507,9 +752,47 @@ google.golang.org/appengine/urlfetch # google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.29.1 +google.golang.org/grpc +google.golang.org/grpc/attributes +google.golang.org/grpc/backoff +google.golang.org/grpc/balancer +google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/codes +google.golang.org/grpc/connectivity +google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/internal +google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/proto +google.golang.org/grpc/grpclog +google.golang.org/grpc/health +google.golang.org/grpc/health/grpc_health_v1 +google.golang.org/grpc/internal +google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancerload +google.golang.org/grpc/internal/binarylog +google.golang.org/grpc/internal/buffer +google.golang.org/grpc/internal/channelz +google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/grpclog +google.golang.org/grpc/internal/grpcrand +google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/status +google.golang.org/grpc/internal/syscall +google.golang.org/grpc/internal/transport +google.golang.org/grpc/keepalive +google.golang.org/grpc/metadata +google.golang.org/grpc/naming +google.golang.org/grpc/peer +google.golang.org/grpc/resolver +google.golang.org/grpc/serviceconfig +google.golang.org/grpc/stats google.golang.org/grpc/status +google.golang.org/grpc/tap # google.golang.org/protobuf v1.24.0 google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -548,8 +831,10 @@ gopkg.in/ini.v1 # gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/tomb.v1 # gopkg.in/yaml.v2 v2.3.0 +## explicit gopkg.in/yaml.v2 # helm.sh/helm/v3 v3.3.4 +## explicit helm.sh/helm/v3/internal/ignore helm.sh/helm/v3/internal/sympath helm.sh/helm/v3/internal/version @@ -697,3 +982,8 @@ k8s.io/utils/pointer sigs.k8s.io/structured-merge-diff/v3/value # sigs.k8s.io/yaml v1.2.0 sigs.k8s.io/yaml +# github.com/docker/docker => github.com/Luet-lab/moby v17.12.0-ce-rc1.0.20200605210607-749178b8f80d+incompatible +# github.com/containerd/containerd => github.com/containerd/containerd v1.3.1-0.20200227195959-4d242818bf55 +# github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe +# github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305 +# github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.0-rc9.0.20200221051241-688cf6d43cc4