From 48f1dd042f20c20ce1f51b9b6dc8e7275670283c Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Thu, 6 Jul 2017 17:51:43 +0100 Subject: [PATCH 1/4] Build for Darwin and Windows in CI Signed-off-by: Ian Campbell --- circle.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/circle.yml b/circle.yml index 695a61395..93d250722 100644 --- a/circle.yml +++ b/circle.yml @@ -8,4 +8,6 @@ jobs: steps: - checkout - run: go get github.com/golang/lint/golint - - run: cd $GOPATH/src/github.com/moby/tool && make test \ No newline at end of file + - run: cd $GOPATH/src/github.com/moby/tool && make test + - run: cd $GOPATH/src/github.com/moby/tool && make clean && make GOOS=darwin + - run: cd $GOPATH/src/github.com/moby/tool && make clean && make GOOS=windows From 5e75f8ca8b3b5c87e43826cecacf7c3f48b43bc4 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Thu, 6 Jul 2017 18:36:23 +0100 Subject: [PATCH 2/4] Only run `go test` and `go vet` if not cross building (i.e. ${GOOS} unset) They will try to run things and hence cannot be done when cross building. Signed-off-by: Ian Campbell --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index 50c65fb3a..b7cc10892 100644 --- a/Makefile +++ b/Makefile @@ -15,10 +15,12 @@ lint: @test -z "$(shell find . -type f -name "*.go" -not -path "./vendor/*" -not -name "*.pb.*" -exec golint {} \; | tee /dev/stderr)" # gofmt @test -z "$$(gofmt -s -l .| grep -v .pb. | grep -v vendor/ | tee /dev/stderr)" +ifeq ($(GOOS),) # govet @test -z "$$(go tool vet -printf=false . 2>&1 | grep -v vendor/ | tee /dev/stderr)" # go test @go test github.com/moby/tool/src/moby +endif test: moby ./moby build -output tar test/test.yml From 3c5fad45392cc8b1310b0b5d85610906d9cf9d52 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Mon, 10 Jul 2017 14:38:21 +0100 Subject: [PATCH 3/4] Remove dependency on docker/docker/cli We were pulling in this whole stack of packages just for `trust.ReleasesRole`. Just define it locally. Signed-off-by: Ian Campbell --- src/moby/trust.go | 11 +- vendor.conf | 6 - vendor/github.com/Azure/go-ansiterm/LICENSE | 21 - vendor/github.com/Azure/go-ansiterm/README.md | 9 - .../github.com/Azure/go-ansiterm/constants.go | 188 --- .../github.com/Azure/go-ansiterm/context.go | 7 - .../Azure/go-ansiterm/csi_entry_state.go | 49 - .../Azure/go-ansiterm/csi_param_state.go | 38 - .../go-ansiterm/escape_intermediate_state.go | 36 - .../Azure/go-ansiterm/escape_state.go | 47 - .../Azure/go-ansiterm/event_handler.go | 90 -- .../Azure/go-ansiterm/ground_state.go | 24 - .../Azure/go-ansiterm/osc_string_state.go | 31 - vendor/github.com/Azure/go-ansiterm/parser.go | 136 -- .../go-ansiterm/parser_action_helpers.go | 103 -- .../Azure/go-ansiterm/parser_actions.go | 122 -- vendor/github.com/Azure/go-ansiterm/states.go | 71 - .../github.com/Azure/go-ansiterm/utilities.go | 21 - .../Azure/go-ansiterm/winterm/ansi.go | 182 --- .../Azure/go-ansiterm/winterm/api.go | 322 ---- .../go-ansiterm/winterm/attr_translation.go | 100 -- .../go-ansiterm/winterm/cursor_helpers.go | 101 -- .../go-ansiterm/winterm/erase_helpers.go | 84 - .../go-ansiterm/winterm/scroll_helper.go | 118 -- .../Azure/go-ansiterm/winterm/utilities.go | 9 - .../go-ansiterm/winterm/win_event_handler.go | 726 --------- vendor/github.com/Nvveen/Gotty/LICENSE | 26 - vendor/github.com/Nvveen/Gotty/README | 5 - vendor/github.com/Nvveen/Gotty/attributes.go | 514 ------- vendor/github.com/Nvveen/Gotty/gotty.go | 238 --- vendor/github.com/Nvveen/Gotty/parser.go | 362 ----- vendor/github.com/Nvveen/Gotty/types.go | 23 - .../client/client.go | 87 -- .../client/command.go | 37 - .../credentials/credentials.go | 150 -- .../credentials/error.go | 37 - .../credentials/helper.go | 14 - .../docker/docker/cli/command/cli.go | 310 ---- .../docker/docker/cli/command/events_utils.go | 49 - .../docker/docker/cli/command/in.go | 56 - .../docker/docker/cli/command/out.go | 50 - .../docker/docker/cli/command/registry.go | 187 --- .../docker/docker/cli/command/stream.go | 34 - .../docker/docker/cli/command/trust.go | 43 - .../docker/docker/cli/command/utils.go | 119 -- .../docker/docker/cli/config/config.go | 120 -- .../docker/cli/config/configfile/file.go | 189 --- .../cli/config/credentials/credentials.go | 17 - .../cli/config/credentials/default_store.go | 22 - .../credentials/default_store_darwin.go | 3 - .../config/credentials/default_store_linux.go | 3 - .../credentials/default_store_unsupported.go | 5 - .../credentials/default_store_windows.go | 3 - .../cli/config/credentials/file_store.go | 53 - .../cli/config/credentials/native_store.go | 144 -- .../docker/docker/cli/flags/client.go | 13 - .../docker/docker/cli/flags/common.go | 120 -- .../docker/docker/cli/trust/trust.go | 232 --- .../docker/docker/dockerversion/useragent.go | 76 - .../docker/dockerversion/version_lib.go | 16 - vendor/github.com/docker/docker/opts/env.go | 46 - vendor/github.com/docker/docker/opts/hosts.go | 165 -- .../docker/docker/opts/hosts_unix.go | 8 - .../docker/docker/opts/hosts_windows.go | 6 - vendor/github.com/docker/docker/opts/ip.go | 47 - vendor/github.com/docker/docker/opts/mount.go | 173 --- vendor/github.com/docker/docker/opts/opts.go | 488 ------ .../docker/docker/opts/opts_unix.go | 6 - .../docker/docker/opts/opts_windows.go | 56 - vendor/github.com/docker/docker/opts/port.go | 162 -- .../docker/docker/opts/quotedstring.go | 37 - .../github.com/docker/docker/opts/runtime.go | 79 - .../github.com/docker/docker/opts/secret.go | 103 -- .../docker/docker/opts/throttledevice.go | 111 -- .../github.com/docker/docker/opts/ulimit.go | 57 - .../docker/docker/opts/weightdevice.go | 89 -- .../docker/docker/pkg/homedir/homedir.go | 39 - .../docker/pkg/homedir/homedir_linux.go | 23 - .../docker/pkg/homedir/homedir_others.go | 13 - .../docker/docker/pkg/httputils/httputils.go | 56 - .../docker/docker/pkg/httputils/mimetype.go | 29 - .../pkg/httputils/resumablerequestreader.go | 96 -- .../docker/docker/pkg/idtools/idtools.go | 197 --- .../docker/docker/pkg/idtools/idtools_unix.go | 204 --- .../docker/pkg/idtools/idtools_windows.go | 25 - .../docker/pkg/idtools/usergroupadd_linux.go | 164 -- .../pkg/idtools/usergroupadd_unsupported.go | 12 - .../docker/docker/pkg/idtools/utils_unix.go | 32 - .../docker/docker/pkg/jsonlog/jsonlog.go | 42 - .../docker/pkg/jsonlog/jsonlog_marshalling.go | 178 --- .../docker/docker/pkg/jsonlog/jsonlogbytes.go | 122 -- .../docker/pkg/jsonlog/time_marshalling.go | 27 - .../docker/pkg/jsonmessage/jsonmessage.go | 297 ---- .../docker/pkg/parsers/kernel/kernel.go | 74 - .../pkg/parsers/kernel/kernel_darwin.go | 56 - .../docker/pkg/parsers/kernel/kernel_unix.go | 45 - .../pkg/parsers/kernel/kernel_windows.go | 69 - .../docker/pkg/parsers/kernel/uname_linux.go | 19 - .../pkg/parsers/kernel/uname_solaris.go | 14 - .../pkg/parsers/kernel/uname_unsupported.go | 18 - .../docker/docker/pkg/random/random.go | 71 - .../docker/docker/pkg/stringid/README.md | 1 - .../docker/docker/pkg/stringid/stringid.go | 81 - .../docker/pkg/tarsum/builder_context.go | 21 - .../docker/docker/pkg/tarsum/fileinfosums.go | 126 -- .../docker/docker/pkg/tarsum/tarsum.go | 295 ---- .../docker/docker/pkg/tarsum/versioning.go | 158 -- .../docker/docker/pkg/tarsum/writercloser.go | 22 - .../docker/docker/pkg/term/ascii.go | 66 - .../docker/docker/pkg/term/tc_linux_cgo.go | 50 - .../docker/docker/pkg/term/tc_other.go | 20 - .../docker/docker/pkg/term/tc_solaris_cgo.go | 63 - .../github.com/docker/docker/pkg/term/term.go | 123 -- .../docker/docker/pkg/term/term_solaris.go | 41 - .../docker/docker/pkg/term/term_unix.go | 29 - .../docker/docker/pkg/term/term_windows.go | 233 --- .../docker/docker/pkg/term/termios_darwin.go | 69 - .../docker/docker/pkg/term/termios_freebsd.go | 69 - .../docker/docker/pkg/term/termios_linux.go | 47 - .../docker/docker/pkg/term/termios_openbsd.go | 69 - .../docker/pkg/term/windows/ansi_reader.go | 263 ---- .../docker/pkg/term/windows/ansi_writer.go | 64 - .../docker/docker/pkg/term/windows/console.go | 35 - .../docker/docker/pkg/term/windows/windows.go | 33 - .../docker/docker/pkg/useragent/README.md | 1 - .../docker/docker/pkg/useragent/useragent.go | 55 - .../github.com/docker/docker/registry/auth.go | 303 ---- .../docker/docker/registry/config.go | 389 ----- .../docker/docker/registry/config_unix.go | 25 - .../docker/docker/registry/config_windows.go | 25 - .../docker/docker/registry/endpoint_v1.go | 198 --- .../docker/docker/registry/registry.go | 191 --- .../docker/docker/registry/service.go | 313 ---- .../docker/docker/registry/service_v1.go | 40 - .../docker/docker/registry/service_v2.go | 78 - .../docker/docker/registry/session.go | 783 ---------- .../docker/docker/registry/types.go | 70 - .../docker/notary/passphrase/passphrase.go | 205 --- .../inconshreveable/mousetrap/LICENSE | 13 - .../inconshreveable/mousetrap/README.md | 23 - .../inconshreveable/mousetrap/trap_others.go | 15 - .../inconshreveable/mousetrap/trap_windows.go | 98 -- .../mousetrap/trap_windows_1.4.go | 46 - vendor/github.com/mattn/go-shellwords/LICENSE | 21 - .../github.com/mattn/go-shellwords/README.md | 47 - .../mattn/go-shellwords/shellwords.go | 145 -- .../mattn/go-shellwords/util_posix.go | 19 - .../mattn/go-shellwords/util_windows.go | 17 - .../runc/libcontainer/user/lookup.go | 111 -- .../runc/libcontainer/user/lookup_unix.go | 30 - .../libcontainer/user/lookup_unsupported.go | 21 - .../runc/libcontainer/user/user.go | 441 ------ vendor/github.com/spf13/cobra/LICENSE.txt | 174 --- vendor/github.com/spf13/cobra/README.md | 928 ----------- vendor/github.com/spf13/cobra/args.go | 98 -- .../spf13/cobra/bash_completions.go | 630 -------- vendor/github.com/spf13/cobra/cobra.go | 175 --- vendor/github.com/spf13/cobra/command.go | 1359 ----------------- .../github.com/spf13/cobra/command_notwin.go | 5 - vendor/github.com/spf13/cobra/command_win.go | 26 - vendor/github.com/spf13/pflag/LICENSE | 28 - vendor/github.com/spf13/pflag/README.md | 296 ---- vendor/github.com/spf13/pflag/bool.go | 94 -- vendor/github.com/spf13/pflag/bool_slice.go | 147 -- vendor/github.com/spf13/pflag/count.go | 96 -- vendor/github.com/spf13/pflag/duration.go | 86 -- vendor/github.com/spf13/pflag/flag.go | 1107 -------------- vendor/github.com/spf13/pflag/float32.go | 88 -- vendor/github.com/spf13/pflag/float64.go | 84 - vendor/github.com/spf13/pflag/golangflag.go | 101 -- vendor/github.com/spf13/pflag/int.go | 84 - vendor/github.com/spf13/pflag/int32.go | 88 -- vendor/github.com/spf13/pflag/int64.go | 84 - vendor/github.com/spf13/pflag/int8.go | 88 -- vendor/github.com/spf13/pflag/int_slice.go | 128 -- vendor/github.com/spf13/pflag/ip.go | 94 -- vendor/github.com/spf13/pflag/ip_slice.go | 148 -- vendor/github.com/spf13/pflag/ipmask.go | 122 -- vendor/github.com/spf13/pflag/ipnet.go | 98 -- vendor/github.com/spf13/pflag/string.go | 80 - vendor/github.com/spf13/pflag/string_array.go | 103 -- vendor/github.com/spf13/pflag/string_slice.go | 129 -- vendor/github.com/spf13/pflag/uint.go | 88 -- vendor/github.com/spf13/pflag/uint16.go | 88 -- vendor/github.com/spf13/pflag/uint32.go | 88 -- vendor/github.com/spf13/pflag/uint64.go | 88 -- vendor/github.com/spf13/pflag/uint8.go | 88 -- vendor/github.com/spf13/pflag/uint_slice.go | 126 -- 188 files changed, 8 insertions(+), 22591 deletions(-) delete mode 100644 vendor/github.com/Azure/go-ansiterm/LICENSE delete mode 100644 vendor/github.com/Azure/go-ansiterm/README.md delete mode 100644 vendor/github.com/Azure/go-ansiterm/constants.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/context.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/csi_entry_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/csi_param_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/escape_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/event_handler.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/ground_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/osc_string_state.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/parser.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/parser_actions.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/states.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/utilities.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/ansi.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/api.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/utilities.go delete mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go delete mode 100644 vendor/github.com/Nvveen/Gotty/LICENSE delete mode 100644 vendor/github.com/Nvveen/Gotty/README delete mode 100644 vendor/github.com/Nvveen/Gotty/attributes.go delete mode 100644 vendor/github.com/Nvveen/Gotty/gotty.go delete mode 100644 vendor/github.com/Nvveen/Gotty/parser.go delete mode 100644 vendor/github.com/Nvveen/Gotty/types.go delete mode 100644 vendor/github.com/docker/docker-credential-helpers/client/client.go delete mode 100644 vendor/github.com/docker/docker-credential-helpers/client/command.go delete mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go delete mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/error.go delete mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/helper.go delete mode 100644 vendor/github.com/docker/docker/cli/command/cli.go delete mode 100644 vendor/github.com/docker/docker/cli/command/events_utils.go delete mode 100644 vendor/github.com/docker/docker/cli/command/in.go delete mode 100644 vendor/github.com/docker/docker/cli/command/out.go delete mode 100644 vendor/github.com/docker/docker/cli/command/registry.go delete mode 100644 vendor/github.com/docker/docker/cli/command/stream.go delete mode 100644 vendor/github.com/docker/docker/cli/command/trust.go delete mode 100644 vendor/github.com/docker/docker/cli/command/utils.go delete mode 100644 vendor/github.com/docker/docker/cli/config/config.go delete mode 100644 vendor/github.com/docker/docker/cli/config/configfile/file.go delete mode 100644 vendor/github.com/docker/docker/cli/config/credentials/credentials.go delete mode 100644 vendor/github.com/docker/docker/cli/config/credentials/default_store.go delete mode 100644 vendor/github.com/docker/docker/cli/config/credentials/default_store_darwin.go delete mode 100644 vendor/github.com/docker/docker/cli/config/credentials/default_store_linux.go delete mode 100644 vendor/github.com/docker/docker/cli/config/credentials/default_store_unsupported.go delete mode 100644 vendor/github.com/docker/docker/cli/config/credentials/default_store_windows.go delete mode 100644 vendor/github.com/docker/docker/cli/config/credentials/file_store.go delete mode 100644 vendor/github.com/docker/docker/cli/config/credentials/native_store.go delete mode 100644 vendor/github.com/docker/docker/cli/flags/client.go delete mode 100644 vendor/github.com/docker/docker/cli/flags/common.go delete mode 100644 vendor/github.com/docker/docker/cli/trust/trust.go delete mode 100644 vendor/github.com/docker/docker/dockerversion/useragent.go delete mode 100644 vendor/github.com/docker/docker/dockerversion/version_lib.go delete mode 100644 vendor/github.com/docker/docker/opts/env.go delete mode 100644 vendor/github.com/docker/docker/opts/hosts.go delete mode 100644 vendor/github.com/docker/docker/opts/hosts_unix.go delete mode 100644 vendor/github.com/docker/docker/opts/hosts_windows.go delete mode 100644 vendor/github.com/docker/docker/opts/ip.go delete mode 100644 vendor/github.com/docker/docker/opts/mount.go delete mode 100644 vendor/github.com/docker/docker/opts/opts.go delete mode 100644 vendor/github.com/docker/docker/opts/opts_unix.go delete mode 100644 vendor/github.com/docker/docker/opts/opts_windows.go delete mode 100644 vendor/github.com/docker/docker/opts/port.go delete mode 100644 vendor/github.com/docker/docker/opts/quotedstring.go delete mode 100644 vendor/github.com/docker/docker/opts/runtime.go delete mode 100644 vendor/github.com/docker/docker/opts/secret.go delete mode 100644 vendor/github.com/docker/docker/opts/throttledevice.go delete mode 100644 vendor/github.com/docker/docker/opts/ulimit.go delete mode 100644 vendor/github.com/docker/docker/opts/weightdevice.go delete mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir.go delete mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_others.go delete mode 100644 vendor/github.com/docker/docker/pkg/httputils/httputils.go delete mode 100644 vendor/github.com/docker/docker/pkg/httputils/mimetype.go delete mode 100644 vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/utils_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/random/random.go delete mode 100644 vendor/github.com/docker/docker/pkg/stringid/README.md delete mode 100644 vendor/github.com/docker/docker/pkg/stringid/stringid.go delete mode 100644 vendor/github.com/docker/docker/pkg/tarsum/builder_context.go delete mode 100644 vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go delete mode 100644 vendor/github.com/docker/docker/pkg/tarsum/tarsum.go delete mode 100644 vendor/github.com/docker/docker/pkg/tarsum/versioning.go delete mode 100644 vendor/github.com/docker/docker/pkg/tarsum/writercloser.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/ascii.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/tc_other.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/term.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/term_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/term_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/term_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/termios_darwin.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/termios_freebsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/termios_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/termios_openbsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/windows/console.go delete mode 100644 vendor/github.com/docker/docker/pkg/term/windows/windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/useragent/README.md delete mode 100644 vendor/github.com/docker/docker/pkg/useragent/useragent.go delete mode 100644 vendor/github.com/docker/docker/registry/auth.go delete mode 100644 vendor/github.com/docker/docker/registry/config.go delete mode 100644 vendor/github.com/docker/docker/registry/config_unix.go delete mode 100644 vendor/github.com/docker/docker/registry/config_windows.go delete mode 100644 vendor/github.com/docker/docker/registry/endpoint_v1.go delete mode 100644 vendor/github.com/docker/docker/registry/registry.go delete mode 100644 vendor/github.com/docker/docker/registry/service.go delete mode 100644 vendor/github.com/docker/docker/registry/service_v1.go delete mode 100644 vendor/github.com/docker/docker/registry/service_v2.go delete mode 100644 vendor/github.com/docker/docker/registry/session.go delete mode 100644 vendor/github.com/docker/docker/registry/types.go delete mode 100644 vendor/github.com/docker/notary/passphrase/passphrase.go delete mode 100644 vendor/github.com/inconshreveable/mousetrap/LICENSE delete mode 100644 vendor/github.com/inconshreveable/mousetrap/README.md delete mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_others.go delete mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows.go delete mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go delete mode 100644 vendor/github.com/mattn/go-shellwords/LICENSE delete mode 100644 vendor/github.com/mattn/go-shellwords/README.md delete mode 100644 vendor/github.com/mattn/go-shellwords/shellwords.go delete mode 100644 vendor/github.com/mattn/go-shellwords/util_posix.go delete mode 100644 vendor/github.com/mattn/go-shellwords/util_windows.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go delete mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user.go delete mode 100644 vendor/github.com/spf13/cobra/LICENSE.txt delete mode 100644 vendor/github.com/spf13/cobra/README.md delete mode 100644 vendor/github.com/spf13/cobra/args.go delete mode 100644 vendor/github.com/spf13/cobra/bash_completions.go delete mode 100644 vendor/github.com/spf13/cobra/cobra.go delete mode 100644 vendor/github.com/spf13/cobra/command.go delete mode 100644 vendor/github.com/spf13/cobra/command_notwin.go delete mode 100644 vendor/github.com/spf13/cobra/command_win.go delete mode 100644 vendor/github.com/spf13/pflag/LICENSE delete mode 100644 vendor/github.com/spf13/pflag/README.md delete mode 100644 vendor/github.com/spf13/pflag/bool.go delete mode 100644 vendor/github.com/spf13/pflag/bool_slice.go delete mode 100644 vendor/github.com/spf13/pflag/count.go delete mode 100644 vendor/github.com/spf13/pflag/duration.go delete mode 100644 vendor/github.com/spf13/pflag/flag.go delete mode 100644 vendor/github.com/spf13/pflag/float32.go delete mode 100644 vendor/github.com/spf13/pflag/float64.go delete mode 100644 vendor/github.com/spf13/pflag/golangflag.go delete mode 100644 vendor/github.com/spf13/pflag/int.go delete mode 100644 vendor/github.com/spf13/pflag/int32.go delete mode 100644 vendor/github.com/spf13/pflag/int64.go delete mode 100644 vendor/github.com/spf13/pflag/int8.go delete mode 100644 vendor/github.com/spf13/pflag/int_slice.go delete mode 100644 vendor/github.com/spf13/pflag/ip.go delete mode 100644 vendor/github.com/spf13/pflag/ip_slice.go delete mode 100644 vendor/github.com/spf13/pflag/ipmask.go delete mode 100644 vendor/github.com/spf13/pflag/ipnet.go delete mode 100644 vendor/github.com/spf13/pflag/string.go delete mode 100644 vendor/github.com/spf13/pflag/string_array.go delete mode 100644 vendor/github.com/spf13/pflag/string_slice.go delete mode 100644 vendor/github.com/spf13/pflag/uint.go delete mode 100644 vendor/github.com/spf13/pflag/uint16.go delete mode 100644 vendor/github.com/spf13/pflag/uint32.go delete mode 100644 vendor/github.com/spf13/pflag/uint64.go delete mode 100644 vendor/github.com/spf13/pflag/uint8.go delete mode 100644 vendor/github.com/spf13/pflag/uint_slice.go diff --git a/src/moby/trust.go b/src/moby/trust.go index b5de5b9a3..57ad3331f 100644 --- a/src/moby/trust.go +++ b/src/moby/trust.go @@ -10,6 +10,7 @@ import ( "net" "net/http" "net/url" + "path" "path/filepath" "strings" "time" @@ -19,13 +20,17 @@ import ( "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/auth/challenge" "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/cli/trust" notaryClient "github.com/docker/notary/client" "github.com/docker/notary/trustpinning" "github.com/docker/notary/tuf/data" "github.com/opencontainers/go-digest" ) +var ( + // ReleasesRole is the role named "releases" + ReleasesRole = path.Join(data.CanonicalTargetsRole, "releases") +) + // TrustedReference parses an image string, and does a notary lookup to verify and retrieve the signed digest reference func TrustedReference(image string) (reference.Reference, error) { ref, err := reference.ParseAnyReference(image) @@ -75,13 +80,13 @@ func TrustedReference(image string) (reference.Reference, error) { if err != nil { return nil, err } - target, err := nRepo.GetTargetByName(targetName, trust.ReleasesRole, data.CanonicalTargetsRole) + target, err := nRepo.GetTargetByName(targetName, ReleasesRole, data.CanonicalTargetsRole) if err != nil { return nil, err } // Only get the tag if it's in the top level targets role or the releases delegation role // ignore it if it's in any other delegation roles - if target.Role != trust.ReleasesRole && target.Role != data.CanonicalTargetsRole { + if target.Role != ReleasesRole && target.Role != data.CanonicalTargetsRole { return nil, errors.New("not signed in valid role") } diff --git a/vendor.conf b/vendor.conf index 487af2e3a..33f8b6eb9 100644 --- a/vendor.conf +++ b/vendor.conf @@ -9,14 +9,10 @@ github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3 github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a github.com/docker/notary v0.4.2 github.com/gorilla/mux 4c1c3952b7d9d0a061a3fa7b36fd373ba0398ebc -github.com/mattn/go-shellwords 02e3cf038dcea8290e44424da473dd12be796a8a -github.com/Nvveen/Gotty cd527374f1e5bff4938207604a14f2e38a9cf512 github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb github.com/opencontainers/runc 429a5387123625040bacfbb60d96b1cbd02293ab github.com/opencontainers/runtime-spec 198f23f827eea397d4331d7eb048d9d4c7ff7bee github.com/pkg/errors ff09b135c25aae272398c51a07235b90a75aa4f0 -github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git -github.com/spf13/pflag c990990ab4981d84da820b7b00c85139ca150b5f github.com/surma/gocpio fcb68777e7dc4ea43ffce871b552c0d073c17495 github.com/xeipuuv/gojsonpointer 6fe8760cad3569743d51ddbb243b26f8456742dc github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 @@ -25,6 +21,4 @@ golang.org/x/crypto 573951cbe80bb6352881271bb276f48749eab6f4 golang.org/x/net a6577fac2d73be281a500b310739095313165611 golang.org/x/sys fb4cac33e3196ff7f507ab9b2d2a44b0142f5b5a gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a -github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62 github.com/Microsoft/go-winio v0.4.0 -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE deleted file mode 100644 index e3d9a64d1..000000000 --- a/vendor/github.com/Azure/go-ansiterm/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft Corporation - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md deleted file mode 100644 index e25e38210..000000000 --- a/vendor/github.com/Azure/go-ansiterm/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# go-ansiterm - -This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. - -For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. - -The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). - -See parser_test.go for examples exercising the state machine and generating appropriate function calls. diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go deleted file mode 100644 index 96504a33b..000000000 --- a/vendor/github.com/Azure/go-ansiterm/constants.go +++ /dev/null @@ -1,188 +0,0 @@ -package ansiterm - -const LogEnv = "DEBUG_TERMINAL" - -// ANSI constants -// References: -// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm -// -- http://man7.org/linux/man-pages/man4/console_codes.4.html -// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html -// -- http://en.wikipedia.org/wiki/ANSI_escape_code -// -- http://vt100.net/emu/dec_ansi_parser -// -- http://vt100.net/emu/vt500_parser.svg -// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html -// -- http://www.inwap.com/pdp10/ansicode.txt -const ( - // ECMA-48 Set Graphics Rendition - // Note: - // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved - // -- Fonts could possibly be supported via SetCurrentConsoleFontEx - // -- Windows does not expose the per-window cursor (i.e., caret) blink times - ANSI_SGR_RESET = 0 - ANSI_SGR_BOLD = 1 - ANSI_SGR_DIM = 2 - _ANSI_SGR_ITALIC = 3 - ANSI_SGR_UNDERLINE = 4 - _ANSI_SGR_BLINKSLOW = 5 - _ANSI_SGR_BLINKFAST = 6 - ANSI_SGR_REVERSE = 7 - _ANSI_SGR_INVISIBLE = 8 - _ANSI_SGR_LINETHROUGH = 9 - _ANSI_SGR_FONT_00 = 10 - _ANSI_SGR_FONT_01 = 11 - _ANSI_SGR_FONT_02 = 12 - _ANSI_SGR_FONT_03 = 13 - _ANSI_SGR_FONT_04 = 14 - _ANSI_SGR_FONT_05 = 15 - _ANSI_SGR_FONT_06 = 16 - _ANSI_SGR_FONT_07 = 17 - _ANSI_SGR_FONT_08 = 18 - _ANSI_SGR_FONT_09 = 19 - _ANSI_SGR_FONT_10 = 20 - _ANSI_SGR_DOUBLEUNDERLINE = 21 - ANSI_SGR_BOLD_DIM_OFF = 22 - _ANSI_SGR_ITALIC_OFF = 23 - ANSI_SGR_UNDERLINE_OFF = 24 - _ANSI_SGR_BLINK_OFF = 25 - _ANSI_SGR_RESERVED_00 = 26 - ANSI_SGR_REVERSE_OFF = 27 - _ANSI_SGR_INVISIBLE_OFF = 28 - _ANSI_SGR_LINETHROUGH_OFF = 29 - ANSI_SGR_FOREGROUND_BLACK = 30 - ANSI_SGR_FOREGROUND_RED = 31 - ANSI_SGR_FOREGROUND_GREEN = 32 - ANSI_SGR_FOREGROUND_YELLOW = 33 - ANSI_SGR_FOREGROUND_BLUE = 34 - ANSI_SGR_FOREGROUND_MAGENTA = 35 - ANSI_SGR_FOREGROUND_CYAN = 36 - ANSI_SGR_FOREGROUND_WHITE = 37 - _ANSI_SGR_RESERVED_01 = 38 - ANSI_SGR_FOREGROUND_DEFAULT = 39 - ANSI_SGR_BACKGROUND_BLACK = 40 - ANSI_SGR_BACKGROUND_RED = 41 - ANSI_SGR_BACKGROUND_GREEN = 42 - ANSI_SGR_BACKGROUND_YELLOW = 43 - ANSI_SGR_BACKGROUND_BLUE = 44 - ANSI_SGR_BACKGROUND_MAGENTA = 45 - ANSI_SGR_BACKGROUND_CYAN = 46 - ANSI_SGR_BACKGROUND_WHITE = 47 - _ANSI_SGR_RESERVED_02 = 48 - ANSI_SGR_BACKGROUND_DEFAULT = 49 - // 50 - 65: Unsupported - - ANSI_MAX_CMD_LENGTH = 4096 - - MAX_INPUT_EVENTS = 128 - DEFAULT_WIDTH = 80 - DEFAULT_HEIGHT = 24 - - ANSI_BEL = 0x07 - ANSI_BACKSPACE = 0x08 - ANSI_TAB = 0x09 - ANSI_LINE_FEED = 0x0A - ANSI_VERTICAL_TAB = 0x0B - ANSI_FORM_FEED = 0x0C - ANSI_CARRIAGE_RETURN = 0x0D - ANSI_ESCAPE_PRIMARY = 0x1B - ANSI_ESCAPE_SECONDARY = 0x5B - ANSI_OSC_STRING_ENTRY = 0x5D - ANSI_COMMAND_FIRST = 0x40 - ANSI_COMMAND_LAST = 0x7E - DCS_ENTRY = 0x90 - CSI_ENTRY = 0x9B - OSC_STRING = 0x9D - ANSI_PARAMETER_SEP = ";" - ANSI_CMD_G0 = '(' - ANSI_CMD_G1 = ')' - ANSI_CMD_G2 = '*' - ANSI_CMD_G3 = '+' - ANSI_CMD_DECPNM = '>' - ANSI_CMD_DECPAM = '=' - ANSI_CMD_OSC = ']' - ANSI_CMD_STR_TERM = '\\' - - KEY_CONTROL_PARAM_2 = ";2" - KEY_CONTROL_PARAM_3 = ";3" - KEY_CONTROL_PARAM_4 = ";4" - KEY_CONTROL_PARAM_5 = ";5" - KEY_CONTROL_PARAM_6 = ";6" - KEY_CONTROL_PARAM_7 = ";7" - KEY_CONTROL_PARAM_8 = ";8" - KEY_ESC_CSI = "\x1B[" - KEY_ESC_N = "\x1BN" - KEY_ESC_O = "\x1BO" - - FILL_CHARACTER = ' ' -) - -func getByteRange(start byte, end byte) []byte { - bytes := make([]byte, 0, 32) - for i := start; i <= end; i++ { - bytes = append(bytes, byte(i)) - } - - return bytes -} - -var toGroundBytes = getToGroundBytes() -var executors = getExecuteBytes() - -// SPACE 20+A0 hex Always and everywhere a blank space -// Intermediate 20-2F hex !"#$%&'()*+,-./ -var intermeds = getByteRange(0x20, 0x2F) - -// Parameters 30-3F hex 0123456789:;<=>? -// CSI Parameters 30-39, 3B hex 0123456789; -var csiParams = getByteRange(0x30, 0x3F) - -var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) - -// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ -var upperCase = getByteRange(0x40, 0x5F) - -// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ -var lowerCase = getByteRange(0x60, 0x7E) - -// Alphabetics 40-7E hex (all of upper and lower case) -var alphabetics = append(upperCase, lowerCase...) - -var printables = getByteRange(0x20, 0x7F) - -var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) -var escapeToGroundBytes = getEscapeToGroundBytes() - -// See http://www.vt100.net/emu/vt500_parser.png for description of the complex -// byte ranges below - -func getEscapeToGroundBytes() []byte { - escapeToGroundBytes := getByteRange(0x30, 0x4F) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) - escapeToGroundBytes = append(escapeToGroundBytes, 0x59) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) - return escapeToGroundBytes -} - -func getExecuteBytes() []byte { - executeBytes := getByteRange(0x00, 0x17) - executeBytes = append(executeBytes, 0x19) - executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) - return executeBytes -} - -func getToGroundBytes() []byte { - groundBytes := []byte{0x18} - groundBytes = append(groundBytes, 0x1A) - groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) - groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) - groundBytes = append(groundBytes, 0x99) - groundBytes = append(groundBytes, 0x9A) - groundBytes = append(groundBytes, 0x9C) - return groundBytes -} - -// Delete 7F hex Always and everywhere ignored -// C1 Control 80-9F hex 32 additional control characters -// G1 Displayable A1-FE hex 94 additional displayable characters -// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go deleted file mode 100644 index 8d66e777c..000000000 --- a/vendor/github.com/Azure/go-ansiterm/context.go +++ /dev/null @@ -1,7 +0,0 @@ -package ansiterm - -type ansiContext struct { - currentChar byte - paramBuffer []byte - interBuffer []byte -} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go deleted file mode 100644 index 1bd6057da..000000000 --- a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go +++ /dev/null @@ -1,49 +0,0 @@ -package ansiterm - -type csiEntryState struct { - baseState -} - -func (csiState csiEntryState) Handle(b byte) (s state, e error) { - logger.Infof("CsiEntry::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - return csiState.parser.csiParam, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiEntryState) Transition(s state) error { - logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - case csiState.parser.csiParam: - switch { - case sliceContains(csiParams, csiState.parser.context.currentChar): - csiState.parser.collectParam() - case sliceContains(intermeds, csiState.parser.context.currentChar): - csiState.parser.collectInter() - } - } - - return nil -} - -func (csiState csiEntryState) Enter() error { - csiState.parser.clear() - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go deleted file mode 100644 index 4be35c5fd..000000000 --- a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go +++ /dev/null @@ -1,38 +0,0 @@ -package ansiterm - -type csiParamState struct { - baseState -} - -func (csiState csiParamState) Handle(b byte) (s state, e error) { - logger.Infof("CsiParam::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - csiState.parser.collectParam() - return csiState, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiParamState) Transition(s state) error { - logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go deleted file mode 100644 index 2189eb6b6..000000000 --- a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go +++ /dev/null @@ -1,36 +0,0 @@ -package ansiterm - -type escapeIntermediateState struct { - baseState -} - -func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { - logger.Infof("escapeIntermediateState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(intermeds, b): - return escState, escState.parser.collectInter() - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeIntermediateToGroundBytes, b): - return escState.parser.ground, nil - } - - return escState, nil -} - -func (escState escapeIntermediateState) Transition(s state) error { - logger.Infof("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go deleted file mode 100644 index 7b1b9ad3f..000000000 --- a/vendor/github.com/Azure/go-ansiterm/escape_state.go +++ /dev/null @@ -1,47 +0,0 @@ -package ansiterm - -type escapeState struct { - baseState -} - -func (escState escapeState) Handle(b byte) (s state, e error) { - logger.Infof("escapeState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case b == ANSI_ESCAPE_SECONDARY: - return escState.parser.csiEntry, nil - case b == ANSI_OSC_STRING_ENTRY: - return escState.parser.oscString, nil - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeToGroundBytes, b): - return escState.parser.ground, nil - case sliceContains(intermeds, b): - return escState.parser.escapeIntermediate, nil - } - - return escState, nil -} - -func (escState escapeState) Transition(s state) error { - logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - case escState.parser.escapeIntermediate: - return escState.parser.collectInter() - } - - return nil -} - -func (escState escapeState) Enter() error { - escState.parser.clear() - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go deleted file mode 100644 index 98087b38c..000000000 --- a/vendor/github.com/Azure/go-ansiterm/event_handler.go +++ /dev/null @@ -1,90 +0,0 @@ -package ansiterm - -type AnsiEventHandler interface { - // Print - Print(b byte) error - - // Execute C0 commands - Execute(b byte) error - - // CUrsor Up - CUU(int) error - - // CUrsor Down - CUD(int) error - - // CUrsor Forward - CUF(int) error - - // CUrsor Backward - CUB(int) error - - // Cursor to Next Line - CNL(int) error - - // Cursor to Previous Line - CPL(int) error - - // Cursor Horizontal position Absolute - CHA(int) error - - // Vertical line Position Absolute - VPA(int) error - - // CUrsor Position - CUP(int, int) error - - // Horizontal and Vertical Position (depends on PUM) - HVP(int, int) error - - // Text Cursor Enable Mode - DECTCEM(bool) error - - // Origin Mode - DECOM(bool) error - - // 132 Column Mode - DECCOLM(bool) error - - // Erase in Display - ED(int) error - - // Erase in Line - EL(int) error - - // Insert Line - IL(int) error - - // Delete Line - DL(int) error - - // Insert Character - ICH(int) error - - // Delete Character - DCH(int) error - - // Set Graphics Rendition - SGR([]int) error - - // Pan Down - SU(int) error - - // Pan Up - SD(int) error - - // Device Attributes - DA([]string) error - - // Set Top and Bottom Margins - DECSTBM(int, int) error - - // Index - IND() error - - // Reverse Index - RI() error - - // Flush updates from previous commands - Flush() error -} diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go deleted file mode 100644 index 52451e946..000000000 --- a/vendor/github.com/Azure/go-ansiterm/ground_state.go +++ /dev/null @@ -1,24 +0,0 @@ -package ansiterm - -type groundState struct { - baseState -} - -func (gs groundState) Handle(b byte) (s state, e error) { - gs.parser.context.currentChar = b - - nextState, err := gs.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(printables, b): - return gs, gs.parser.print() - - case sliceContains(executors, b): - return gs, gs.parser.execute() - } - - return gs, nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go deleted file mode 100644 index 24062d420..000000000 --- a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ /dev/null @@ -1,31 +0,0 @@ -package ansiterm - -type oscStringState struct { - baseState -} - -func (oscState oscStringState) Handle(b byte) (s state, e error) { - logger.Infof("OscString::Handle %#x", b) - nextState, err := oscState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case isOscStringTerminator(b): - return oscState.parser.ground, nil - } - - return oscState, nil -} - -// See below for OSC string terminators for linux -// http://man7.org/linux/man-pages/man4/console_codes.4.html -func isOscStringTerminator(b byte) bool { - - if b == ANSI_BEL || b == 0x5C { - return true - } - - return false -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go deleted file mode 100644 index 169f68dbe..000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser.go +++ /dev/null @@ -1,136 +0,0 @@ -package ansiterm - -import ( - "errors" - "io/ioutil" - "os" - - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger - -type AnsiParser struct { - currState state - eventHandler AnsiEventHandler - context *ansiContext - csiEntry state - csiParam state - dcsEntry state - escape state - escapeIntermediate state - error state - ground state - oscString state - stateMap []state -} - -func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiParser.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.InfoLevel, - } - - parser := &AnsiParser{ - eventHandler: evtHandler, - context: &ansiContext{}, - } - - parser.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: parser}} - parser.csiParam = csiParamState{baseState{name: "CsiParam", parser: parser}} - parser.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: parser}} - parser.escape = escapeState{baseState{name: "Escape", parser: parser}} - parser.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: parser}} - parser.error = errorState{baseState{name: "Error", parser: parser}} - parser.ground = groundState{baseState{name: "Ground", parser: parser}} - parser.oscString = oscStringState{baseState{name: "OscString", parser: parser}} - - parser.stateMap = []state{ - parser.csiEntry, - parser.csiParam, - parser.dcsEntry, - parser.escape, - parser.escapeIntermediate, - parser.error, - parser.ground, - parser.oscString, - } - - parser.currState = getState(initialState, parser.stateMap) - - logger.Infof("CreateParser: parser %p", parser) - return parser -} - -func getState(name string, states []state) state { - for _, el := range states { - if el.Name() == name { - return el - } - } - - return nil -} - -func (ap *AnsiParser) Parse(bytes []byte) (int, error) { - for i, b := range bytes { - if err := ap.handle(b); err != nil { - return i, err - } - } - - return len(bytes), ap.eventHandler.Flush() -} - -func (ap *AnsiParser) handle(b byte) error { - ap.context.currentChar = b - newState, err := ap.currState.Handle(b) - if err != nil { - return err - } - - if newState == nil { - logger.Warning("newState is nil") - return errors.New("New state of 'nil' is invalid.") - } - - if newState != ap.currState { - if err := ap.changeState(newState); err != nil { - return err - } - } - - return nil -} - -func (ap *AnsiParser) changeState(newState state) error { - logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) - - // Exit old state - if err := ap.currState.Exit(); err != nil { - logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) - return err - } - - // Perform transition action - if err := ap.currState.Transition(newState); err != nil { - logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) - return err - } - - // Enter new state - if err := newState.Enter(); err != nil { - logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err) - return err - } - - ap.currState = newState - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go deleted file mode 100644 index 8b69a67a5..000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go +++ /dev/null @@ -1,103 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func parseParams(bytes []byte) ([]string, error) { - paramBuff := make([]byte, 0, 0) - params := []string{} - - for _, v := range bytes { - if v == ';' { - if len(paramBuff) > 0 { - // Completed parameter, append it to the list - s := string(paramBuff) - params = append(params, s) - paramBuff = make([]byte, 0, 0) - } - } else { - paramBuff = append(paramBuff, v) - } - } - - // Last parameter may not be terminated with ';' - if len(paramBuff) > 0 { - s := string(paramBuff) - params = append(params, s) - } - - logger.Infof("Parsed params: %v with length: %d", params, len(params)) - return params, nil -} - -func parseCmd(context ansiContext) (string, error) { - return string(context.currentChar), nil -} - -func getInt(params []string, dflt int) int { - i := getInts(params, 1, dflt)[0] - logger.Infof("getInt: %v", i) - return i -} - -func getInts(params []string, minCount int, dflt int) []int { - ints := []int{} - - for _, v := range params { - i, _ := strconv.Atoi(v) - // Zero is mapped to the default value in VT100. - if i == 0 { - i = dflt - } - ints = append(ints, i) - } - - if len(ints) < minCount { - remaining := minCount - len(ints) - for i := 0; i < remaining; i++ { - ints = append(ints, dflt) - } - } - - logger.Infof("getInts: %v", ints) - - return ints -} - -func (ap *AnsiParser) modeDispatch(param string, set bool) error { - switch param { - case "?3": - return ap.eventHandler.DECCOLM(set) - case "?6": - return ap.eventHandler.DECOM(set) - case "?25": - return ap.eventHandler.DECTCEM(set) - } - return nil -} - -func (ap *AnsiParser) hDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], true) - } - - return nil -} - -func (ap *AnsiParser) lDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], false) - } - - return nil -} - -func getEraseParam(params []string) int { - param := getInt(params, 0) - if param < 0 || 3 < param { - param = 0 - } - - return param -} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go deleted file mode 100644 index 58750a2d2..000000000 --- a/vendor/github.com/Azure/go-ansiterm/parser_actions.go +++ /dev/null @@ -1,122 +0,0 @@ -package ansiterm - -import ( - "fmt" -) - -func (ap *AnsiParser) collectParam() error { - currChar := ap.context.currentChar - logger.Infof("collectParam %#x", currChar) - ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) - return nil -} - -func (ap *AnsiParser) collectInter() error { - currChar := ap.context.currentChar - logger.Infof("collectInter %#x", currChar) - ap.context.paramBuffer = append(ap.context.interBuffer, currChar) - return nil -} - -func (ap *AnsiParser) escDispatch() error { - cmd, _ := parseCmd(*ap.context) - intermeds := ap.context.interBuffer - logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar) - logger.Infof("escDispatch: %v(%v)", cmd, intermeds) - - switch cmd { - case "D": // IND - return ap.eventHandler.IND() - case "E": // NEL, equivalent to CRLF - err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) - if err == nil { - err = ap.eventHandler.Execute(ANSI_LINE_FEED) - } - return err - case "M": // RI - return ap.eventHandler.RI() - } - - return nil -} - -func (ap *AnsiParser) csiDispatch() error { - cmd, _ := parseCmd(*ap.context) - params, _ := parseParams(ap.context.paramBuffer) - - logger.Infof("csiDispatch: %v(%v)", cmd, params) - - switch cmd { - case "@": - return ap.eventHandler.ICH(getInt(params, 1)) - case "A": - return ap.eventHandler.CUU(getInt(params, 1)) - case "B": - return ap.eventHandler.CUD(getInt(params, 1)) - case "C": - return ap.eventHandler.CUF(getInt(params, 1)) - case "D": - return ap.eventHandler.CUB(getInt(params, 1)) - case "E": - return ap.eventHandler.CNL(getInt(params, 1)) - case "F": - return ap.eventHandler.CPL(getInt(params, 1)) - case "G": - return ap.eventHandler.CHA(getInt(params, 1)) - case "H": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.CUP(x, y) - case "J": - param := getEraseParam(params) - return ap.eventHandler.ED(param) - case "K": - param := getEraseParam(params) - return ap.eventHandler.EL(param) - case "L": - return ap.eventHandler.IL(getInt(params, 1)) - case "M": - return ap.eventHandler.DL(getInt(params, 1)) - case "P": - return ap.eventHandler.DCH(getInt(params, 1)) - case "S": - return ap.eventHandler.SU(getInt(params, 1)) - case "T": - return ap.eventHandler.SD(getInt(params, 1)) - case "c": - return ap.eventHandler.DA(params) - case "d": - return ap.eventHandler.VPA(getInt(params, 1)) - case "f": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.HVP(x, y) - case "h": - return ap.hDispatch(params) - case "l": - return ap.lDispatch(params) - case "m": - return ap.eventHandler.SGR(getInts(params, 1, 0)) - case "r": - ints := getInts(params, 2, 1) - top, bottom := ints[0], ints[1] - return ap.eventHandler.DECSTBM(top, bottom) - default: - logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)) - return nil - } - -} - -func (ap *AnsiParser) print() error { - return ap.eventHandler.Print(ap.context.currentChar) -} - -func (ap *AnsiParser) clear() error { - ap.context = &ansiContext{} - return nil -} - -func (ap *AnsiParser) execute() error { - return ap.eventHandler.Execute(ap.context.currentChar) -} diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go deleted file mode 100644 index f2ea1fcd1..000000000 --- a/vendor/github.com/Azure/go-ansiterm/states.go +++ /dev/null @@ -1,71 +0,0 @@ -package ansiterm - -type stateID int - -type state interface { - Enter() error - Exit() error - Handle(byte) (state, error) - Name() string - Transition(state) error -} - -type baseState struct { - name string - parser *AnsiParser -} - -func (base baseState) Enter() error { - return nil -} - -func (base baseState) Exit() error { - return nil -} - -func (base baseState) Handle(b byte) (s state, e error) { - - switch { - case b == CSI_ENTRY: - return base.parser.csiEntry, nil - case b == DCS_ENTRY: - return base.parser.dcsEntry, nil - case b == ANSI_ESCAPE_PRIMARY: - return base.parser.escape, nil - case b == OSC_STRING: - return base.parser.oscString, nil - case sliceContains(toGroundBytes, b): - return base.parser.ground, nil - } - - return nil, nil -} - -func (base baseState) Name() string { - return base.name -} - -func (base baseState) Transition(s state) error { - if s == base.parser.ground { - execBytes := []byte{0x18} - execBytes = append(execBytes, 0x1A) - execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) - execBytes = append(execBytes, getByteRange(0x91, 0x97)...) - execBytes = append(execBytes, 0x99) - execBytes = append(execBytes, 0x9A) - - if sliceContains(execBytes, base.parser.context.currentChar) { - return base.parser.execute() - } - } - - return nil -} - -type dcsEntryState struct { - baseState -} - -type errorState struct { - baseState -} diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go deleted file mode 100644 index 392114493..000000000 --- a/vendor/github.com/Azure/go-ansiterm/utilities.go +++ /dev/null @@ -1,21 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func sliceContains(bytes []byte, b byte) bool { - for _, v := range bytes { - if v == b { - return true - } - } - - return false -} - -func convertBytesToInteger(bytes []byte) int { - s := string(bytes) - i, _ := strconv.Atoi(s) - return i -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go deleted file mode 100644 index daf2f0696..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go +++ /dev/null @@ -1,182 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "os" - "strconv" - "strings" - "syscall" - - "github.com/Azure/go-ansiterm" -) - -// Windows keyboard constants -// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. -const ( - VK_PRIOR = 0x21 // PAGE UP key - VK_NEXT = 0x22 // PAGE DOWN key - VK_END = 0x23 // END key - VK_HOME = 0x24 // HOME key - VK_LEFT = 0x25 // LEFT ARROW key - VK_UP = 0x26 // UP ARROW key - VK_RIGHT = 0x27 // RIGHT ARROW key - VK_DOWN = 0x28 // DOWN ARROW key - VK_SELECT = 0x29 // SELECT key - VK_PRINT = 0x2A // PRINT key - VK_EXECUTE = 0x2B // EXECUTE key - VK_SNAPSHOT = 0x2C // PRINT SCREEN key - VK_INSERT = 0x2D // INS key - VK_DELETE = 0x2E // DEL key - VK_HELP = 0x2F // HELP key - VK_F1 = 0x70 // F1 key - VK_F2 = 0x71 // F2 key - VK_F3 = 0x72 // F3 key - VK_F4 = 0x73 // F4 key - VK_F5 = 0x74 // F5 key - VK_F6 = 0x75 // F6 key - VK_F7 = 0x76 // F7 key - VK_F8 = 0x77 // F8 key - VK_F9 = 0x78 // F9 key - VK_F10 = 0x79 // F10 key - VK_F11 = 0x7A // F11 key - VK_F12 = 0x7B // F12 key - - RIGHT_ALT_PRESSED = 0x0001 - LEFT_ALT_PRESSED = 0x0002 - RIGHT_CTRL_PRESSED = 0x0004 - LEFT_CTRL_PRESSED = 0x0008 - SHIFT_PRESSED = 0x0010 - NUMLOCK_ON = 0x0020 - SCROLLLOCK_ON = 0x0040 - CAPSLOCK_ON = 0x0080 - ENHANCED_KEY = 0x0100 -) - -type ansiCommand struct { - CommandBytes []byte - Command string - Parameters []string - IsSpecial bool -} - -func newAnsiCommand(command []byte) *ansiCommand { - - if isCharacterSelectionCmdChar(command[1]) { - // Is Character Set Selection commands - return &ansiCommand{ - CommandBytes: command, - Command: string(command), - IsSpecial: true, - } - } - - // last char is command character - lastCharIndex := len(command) - 1 - - ac := &ansiCommand{ - CommandBytes: command, - Command: string(command[lastCharIndex]), - IsSpecial: false, - } - - // more than a single escape - if lastCharIndex != 0 { - start := 1 - // skip if double char escape sequence - if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { - start++ - } - // convert this to GetNextParam method - ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) - } - - return ac -} - -func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { - if index < 0 || index >= len(ac.Parameters) { - return defaultValue - } - - param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) - if err != nil { - return defaultValue - } - - return int16(param) -} - -func (ac *ansiCommand) String() string { - return fmt.Sprintf("0x%v \"%v\" (\"%v\")", - bytesToHex(ac.CommandBytes), - ac.Command, - strings.Join(ac.Parameters, "\",\"")) -} - -// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. -// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. -func isAnsiCommandChar(b byte) bool { - switch { - case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: - return true - case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: - // non-CSI escape sequence terminator - return true - case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: - // String escape sequence terminator - return true - } - return false -} - -func isXtermOscSequence(command []byte, current byte) bool { - return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) -} - -func isCharacterSelectionCmdChar(b byte) bool { - return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) -} - -// bytesToHex converts a slice of bytes to a human-readable string. -func bytesToHex(b []byte) string { - hex := make([]string, len(b)) - for i, ch := range b { - hex[i] = fmt.Sprintf("%X", ch) - } - return strings.Join(hex, "") -} - -// ensureInRange adjusts the passed value, if necessary, to ensure it is within -// the passed min / max range. -func ensureInRange(n int16, min int16, max int16) int16 { - if n < min { - return min - } else if n > max { - return max - } else { - return n - } -} - -func GetStdFile(nFile int) (*os.File, uintptr) { - var file *os.File - switch nFile { - case syscall.STD_INPUT_HANDLE: - file = os.Stdin - case syscall.STD_OUTPUT_HANDLE: - file = os.Stdout - case syscall.STD_ERROR_HANDLE: - file = os.Stderr - default: - panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) - } - - fd, err := syscall.GetStdHandle(nFile) - if err != nil { - panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err)) - } - - return file, uintptr(fd) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go deleted file mode 100644 index 462d92f8e..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/api.go +++ /dev/null @@ -1,322 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "syscall" - "unsafe" -) - -//=========================================================================================================== -// IMPORTANT NOTE: -// -// The methods below make extensive use of the "unsafe" package to obtain the required pointers. -// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack -// variables) the pointers reference *before* the API completes. -// -// As a result, in those cases, the code must hint that the variables remain in active by invoking the -// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer -// require unsafe pointers. -// -// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform -// the garbage collector the variables remain in use if: -// -// -- The value is not a pointer (e.g., int32, struct) -// -- The value is not referenced by the method after passing the pointer to Windows -// -// See http://golang.org/doc/go1.3. -//=========================================================================================================== - -var ( - kernel32DLL = syscall.NewLazyDLL("kernel32.dll") - - getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") - setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") - setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") - setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") - getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") - setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") - scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") - setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") - setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") - writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") - readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") - waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") -) - -// Windows Console constants -const ( - // Console modes - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. - ENABLE_PROCESSED_INPUT = 0x0001 - ENABLE_LINE_INPUT = 0x0002 - ENABLE_ECHO_INPUT = 0x0004 - ENABLE_WINDOW_INPUT = 0x0008 - ENABLE_MOUSE_INPUT = 0x0010 - ENABLE_INSERT_MODE = 0x0020 - ENABLE_QUICK_EDIT_MODE = 0x0040 - ENABLE_EXTENDED_FLAGS = 0x0080 - - ENABLE_PROCESSED_OUTPUT = 0x0001 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 - - // Character attributes - // Note: - // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). - // Clearing all foreground or background colors results in black; setting all creates white. - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. - FOREGROUND_BLUE uint16 = 0x0001 - FOREGROUND_GREEN uint16 = 0x0002 - FOREGROUND_RED uint16 = 0x0004 - FOREGROUND_INTENSITY uint16 = 0x0008 - FOREGROUND_MASK uint16 = 0x000F - - BACKGROUND_BLUE uint16 = 0x0010 - BACKGROUND_GREEN uint16 = 0x0020 - BACKGROUND_RED uint16 = 0x0040 - BACKGROUND_INTENSITY uint16 = 0x0080 - BACKGROUND_MASK uint16 = 0x00F0 - - COMMON_LVB_MASK uint16 = 0xFF00 - COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 - COMMON_LVB_UNDERSCORE uint16 = 0x8000 - - // Input event types - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - KEY_EVENT = 0x0001 - MOUSE_EVENT = 0x0002 - WINDOW_BUFFER_SIZE_EVENT = 0x0004 - MENU_EVENT = 0x0008 - FOCUS_EVENT = 0x0010 - - // WaitForSingleObject return codes - WAIT_ABANDONED = 0x00000080 - WAIT_FAILED = 0xFFFFFFFF - WAIT_SIGNALED = 0x0000000 - WAIT_TIMEOUT = 0x00000102 - - // WaitForSingleObject wait duration - WAIT_INFINITE = 0xFFFFFFFF - WAIT_ONE_SECOND = 1000 - WAIT_HALF_SECOND = 500 - WAIT_QUARTER_SECOND = 250 -) - -// Windows API Console types -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) -// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment -type ( - CHAR_INFO struct { - UnicodeChar uint16 - Attributes uint16 - } - - CONSOLE_CURSOR_INFO struct { - Size uint32 - Visible int32 - } - - CONSOLE_SCREEN_BUFFER_INFO struct { - Size COORD - CursorPosition COORD - Attributes uint16 - Window SMALL_RECT - MaximumWindowSize COORD - } - - COORD struct { - X int16 - Y int16 - } - - SMALL_RECT struct { - Left int16 - Top int16 - Right int16 - Bottom int16 - } - - // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - INPUT_RECORD struct { - EventType uint16 - KeyEvent KEY_EVENT_RECORD - } - - KEY_EVENT_RECORD struct { - KeyDown int32 - RepeatCount uint16 - VirtualKeyCode uint16 - VirtualScanCode uint16 - UnicodeChar uint16 - ControlKeyState uint32 - } - - WINDOW_BUFFER_SIZE struct { - Size COORD - } -) - -// boolToBOOL converts a Go bool into a Windows int32. -func boolToBOOL(f bool) int32 { - if f { - return int32(1) - } else { - return int32(0) - } -} - -// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. -func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorInfo sets the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. -func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorPosition location of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. -func SetConsoleCursorPosition(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// GetConsoleMode gets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. -func GetConsoleMode(handle uintptr) (mode uint32, err error) { - err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) - return mode, err -} - -// SetConsoleMode sets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. -func SetConsoleMode(handle uintptr, mode uint32) error { - r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) - use(mode) - return checkError(r1, r2, err) -} - -// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. -func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { - info := CONSOLE_SCREEN_BUFFER_INFO{} - err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) - if err != nil { - return nil, err - } - return &info, nil -} - -func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { - r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) - use(scrollRect) - use(clipRect) - use(destOrigin) - use(char) - return checkError(r1, r2, err) -} - -// SetConsoleScreenBufferSize sets the size of the console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. -func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// SetConsoleTextAttribute sets the attributes of characters written to the -// console screen buffer by the WriteFile or WriteConsole function. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. -func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { - r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) - use(attribute) - return checkError(r1, r2, err) -} - -// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. -// Note that the size and location must be within and no larger than the backing console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. -func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { - r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) - use(isAbsolute) - use(rect) - return checkError(r1, r2, err) -} - -// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. -func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { - r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) - use(buffer) - use(bufferSize) - use(bufferCoord) - return checkError(r1, r2, err) -} - -// ReadConsoleInput reads (and removes) data from the console input buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. -func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { - r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) - use(buffer) - return checkError(r1, r2, err) -} - -// WaitForSingleObject waits for the passed handle to be signaled. -// It returns true if the handle was signaled; false otherwise. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. -func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { - r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) - switch r1 { - case WAIT_ABANDONED, WAIT_TIMEOUT: - return false, nil - case WAIT_SIGNALED: - return true, nil - } - use(msWait) - return false, err -} - -// String helpers -func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { - return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) -} - -func (coord COORD) String() string { - return fmt.Sprintf("%v,%v", coord.X, coord.Y) -} - -func (rect SMALL_RECT) String() string { - return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) -} - -// checkError evaluates the results of a Windows API call and returns the error if it failed. -func checkError(r1, r2 uintptr, err error) error { - // Windows APIs return non-zero to indicate success - if r1 != 0 { - return nil - } - - // Return the error if provided, otherwise default to EINVAL - if err != nil { - return err - } - return syscall.EINVAL -} - -// coordToPointer converts a COORD into a uintptr (by fooling the type system). -func coordToPointer(c COORD) uintptr { - // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. - return uintptr(*((*uint32)(unsafe.Pointer(&c)))) -} - -// use is a no-op, but the compiler cannot see that it is. -// Calling use(p) ensures that p is kept live until that point. -func use(p interface{}) {} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go deleted file mode 100644 index cbec8f728..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -const ( - FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE -) - -// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the -// request represented by the passed ANSI mode. -func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { - switch ansiMode { - - // Mode styles - case ansiterm.ANSI_SGR_BOLD: - windowsMode = windowsMode | FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: - windowsMode &^= FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_UNDERLINE: - windowsMode = windowsMode | COMMON_LVB_UNDERSCORE - - case ansiterm.ANSI_SGR_REVERSE: - inverted = true - - case ansiterm.ANSI_SGR_REVERSE_OFF: - inverted = false - - case ansiterm.ANSI_SGR_UNDERLINE_OFF: - windowsMode &^= COMMON_LVB_UNDERSCORE - - // Foreground colors - case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: - windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_BLACK: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_RED: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED - - case ansiterm.ANSI_SGR_FOREGROUND_GREEN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_BLUE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_CYAN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_WHITE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - - // Background colors - case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: - // Black with no intensity - windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_BLACK: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_RED: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED - - case ansiterm.ANSI_SGR_BACKGROUND_GREEN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_BLUE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_CYAN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_WHITE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE - } - - return windowsMode, inverted -} - -// invertAttributes inverts the foreground and background colors of a Windows attributes value -func invertAttributes(windowsMode uint16) uint16 { - return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go deleted file mode 100644 index f015723ad..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go +++ /dev/null @@ -1,101 +0,0 @@ -// +build windows - -package winterm - -const ( - horizontal = iota - vertical -) - -func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { - if h.originMode { - sr := h.effectiveSr(info.Window) - return SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - } else { - return SMALL_RECT{ - Top: info.Window.Top, - Bottom: info.Window.Bottom, - Left: 0, - Right: info.Size.X - 1, - } - } -} - -// setCursorPosition sets the cursor to the specified position, bounded to the screen size -func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { - position.X = ensureInRange(position.X, window.Left, window.Right) - position.Y = ensureInRange(position.Y, window.Top, window.Bottom) - err := SetConsoleCursorPosition(h.fd, position) - if err != nil { - return err - } - logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y) - return err -} - -func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { - return h.moveCursor(vertical, param) -} - -func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { - return h.moveCursor(horizontal, param) -} - -func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - switch moveMode { - case horizontal: - position.X += int16(param) - case vertical: - position.Y += int16(param) - } - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = 0 - position.Y += int16(param) - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = int16(param) - 1 - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go deleted file mode 100644 index 244b5fa25..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { - // Ignore an invalid (negative area) request - if toCoord.Y < fromCoord.Y { - return nil - } - - var err error - - var coordStart = COORD{} - var coordEnd = COORD{} - - xCurrent, yCurrent := fromCoord.X, fromCoord.Y - xEnd, yEnd := toCoord.X, toCoord.Y - - // Clear any partial initial line - if xCurrent > 0 { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yCurrent - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent += 1 - } - - // Clear intervening rectangular section - if yCurrent < yEnd { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd-1 - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent = yEnd - } - - // Clear remaining partial ending line - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { - region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} - width := toCoord.X - fromCoord.X + 1 - height := toCoord.Y - fromCoord.Y + 1 - size := uint32(width) * uint32(height) - - if size <= 0 { - return nil - } - - buffer := make([]CHAR_INFO, size) - - char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} - for i := 0; i < int(size); i++ { - buffer[i] = char - } - - err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go deleted file mode 100644 index 706d27057..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build windows - -package winterm - -// effectiveSr gets the current effective scroll region in buffer coordinates -func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { - top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) - bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) - if top >= bottom { - top = window.Top - bottom = window.Bottom - } - return scrollRegion{top: top, bottom: bottom} -} - -func (h *windowsAnsiEventHandler) scrollUp(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - return h.scroll(param, sr, info) -} - -func (h *windowsAnsiEventHandler) scrollDown(param int) error { - return h.scrollUp(-param) -} - -func (h *windowsAnsiEventHandler) deleteLines(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - start := info.CursorPosition.Y - sr := h.effectiveSr(info.Window) - // Lines cannot be inserted or deleted outside the scrolling region. - if start >= sr.top && start <= sr.bottom { - sr.top = start - return h.scroll(param, sr, info) - } else { - return nil - } -} - -func (h *windowsAnsiEventHandler) insertLines(param int) error { - return h.deleteLines(-param) -} - -// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. -func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { - logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) - logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) - - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: 0, - Y: sr.top - int16(param), - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} - -func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - return h.scrollLine(param, info.CursorPosition, info) -} - -func (h *windowsAnsiEventHandler) insertCharacters(param int) error { - return h.deleteCharacters(-param) -} - -// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. -func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: position.Y, - Bottom: position.Y, - Left: position.X, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: position.X - int16(columns), - Y: position.Y, - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go deleted file mode 100644 index afa7635d7..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package winterm - -// AddInRange increments a value by the passed quantity while ensuring the values -// always remain within the supplied min / max range. -func addInRange(n int16, increment int16, min int16, max int16) int16 { - return ensureInRange(n+increment, min, max) -} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go deleted file mode 100644 index 4d858ed61..000000000 --- a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ /dev/null @@ -1,726 +0,0 @@ -// +build windows - -package winterm - -import ( - "bytes" - "io/ioutil" - "os" - "strconv" - - "github.com/Azure/go-ansiterm" - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger - -type windowsAnsiEventHandler struct { - fd uintptr - file *os.File - infoReset *CONSOLE_SCREEN_BUFFER_INFO - sr scrollRegion - buffer bytes.Buffer - attributes uint16 - inverted bool - wrapNext bool - drewMarginByte bool - originMode bool - marginByte byte - curInfo *CONSOLE_SCREEN_BUFFER_INFO - curPos COORD -} - -func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("winEventHandler.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - - infoReset, err := GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - return &windowsAnsiEventHandler{ - fd: fd, - file: file, - infoReset: infoReset, - attributes: infoReset.Attributes, - } -} - -type scrollRegion struct { - top int16 - bottom int16 -} - -// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the -// current cursor position and scroll region settings, in which case it returns -// true. If no special handling is necessary, then it does nothing and returns -// false. -// -// In the false case, the caller should ensure that a carriage return -// and line feed are inserted or that the text is otherwise wrapped. -func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { - if h.wrapNext { - if err := h.Flush(); err != nil { - return false, err - } - h.clearWrap() - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return false, err - } - sr := h.effectiveSr(info.Window) - if pos.Y == sr.bottom { - // Scrolling is necessary. Let Windows automatically scroll if the scrolling region - // is the full window. - if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { - if includeCR { - pos.X = 0 - h.updatePos(pos) - } - return false, nil - } - - // A custom scroll region is active. Scroll the window manually to simulate - // the LF. - if err := h.Flush(); err != nil { - return false, err - } - logger.Info("Simulating LF inside scroll region") - if err := h.scrollUp(1); err != nil { - return false, err - } - if includeCR { - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - - } else if pos.Y < info.Window.Bottom { - // Let Windows handle the LF. - pos.Y++ - if includeCR { - pos.X = 0 - } - h.updatePos(pos) - return false, nil - } else { - // The cursor is at the bottom of the screen but outside the scroll - // region. Skip the LF. - logger.Info("Simulating LF outside scroll region") - if includeCR { - if err := h.Flush(); err != nil { - return false, err - } - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - } -} - -// executeLF executes a LF without a CR. -func (h *windowsAnsiEventHandler) executeLF() error { - handled, err := h.simulateLF(false) - if err != nil { - return err - } - if !handled { - // Windows LF will reset the cursor column position. Write the LF - // and restore the cursor position. - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - if pos.X != 0 { - if err := h.Flush(); err != nil { - return err - } - logger.Info("Resetting cursor position for LF without CR") - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - } - return nil -} - -func (h *windowsAnsiEventHandler) Print(b byte) error { - if h.wrapNext { - h.buffer.WriteByte(h.marginByte) - h.clearWrap() - if _, err := h.simulateLF(true); err != nil { - return err - } - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X == info.Size.X-1 { - h.wrapNext = true - h.marginByte = b - } else { - pos.X++ - h.updatePos(pos) - h.buffer.WriteByte(b) - } - return nil -} - -func (h *windowsAnsiEventHandler) Execute(b byte) error { - switch b { - case ansiterm.ANSI_TAB: - logger.Info("Execute(TAB)") - // Move to the next tab stop, but preserve auto-wrap if already set. - if !h.wrapNext { - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - pos.X = (pos.X + 8) - pos.X%8 - if pos.X >= info.Size.X { - pos.X = info.Size.X - 1 - } - if err := h.Flush(); err != nil { - return err - } - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - return nil - - case ansiterm.ANSI_BEL: - h.buffer.WriteByte(ansiterm.ANSI_BEL) - return nil - - case ansiterm.ANSI_BACKSPACE: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X > 0 { - pos.X-- - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) - } - return nil - - case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: - // Treat as true LF. - return h.executeLF() - - case ansiterm.ANSI_LINE_FEED: - // Simulate a CR and LF for now since there is no way in go-ansiterm - // to tell if the LF should include CR (and more things break when it's - // missing than when it's incorrectly added). - handled, err := h.simulateLF(true) - if handled || err != nil { - return err - } - return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - - case ansiterm.ANSI_CARRIAGE_RETURN: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X != 0 { - pos.X = 0 - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) - } - return nil - - default: - return nil - } -} - -func (h *windowsAnsiEventHandler) CUU(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(-param) -} - -func (h *windowsAnsiEventHandler) CUD(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(param) -} - -func (h *windowsAnsiEventHandler) CUF(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(param) -} - -func (h *windowsAnsiEventHandler) CUB(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(-param) -} - -func (h *windowsAnsiEventHandler) CNL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(param) -} - -func (h *windowsAnsiEventHandler) CPL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(-param) -} - -func (h *windowsAnsiEventHandler) CHA(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorColumn(param) -} - -func (h *windowsAnsiEventHandler) VPA(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("VPA: [[%d]]", param) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - window := h.getCursorWindow(info) - position := info.CursorPosition - position.Y = window.Top + int16(param) - 1 - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) CUP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUP: [[%d %d]]", row, col) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - window := h.getCursorWindow(info) - position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) HVP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("HVP: [[%d %d]]", row, col) - h.clearWrap() - return h.CUP(row, col) -} - -func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) - h.clearWrap() - return nil -} - -func (h *windowsAnsiEventHandler) DECOM(enable bool) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)}) - h.clearWrap() - h.originMode = enable - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) - h.clearWrap() - if err := h.ED(2); err != nil { - return err - } - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - targetWidth := int16(80) - if use132 { - targetWidth = 132 - } - if info.Size.X < targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - logger.Info("set buffer failed:", err) - return err - } - } - window := info.Window - window.Left = 0 - window.Right = targetWidth - 1 - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - logger.Info("set window failed:", err) - return err - } - if info.Size.X > targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - logger.Info("set buffer failed:", err) - return err - } - } - return SetConsoleCursorPosition(h.fd, COORD{0, 0}) -} - -func (h *windowsAnsiEventHandler) ED(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("ED: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - - // [J -- Erases from the cursor to the end of the screen, including the cursor position. - // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. - // [2J -- Erases the complete display. The cursor does not move. - // Notes: - // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X - 1, info.Size.Y - 1} - - case 1: - start = COORD{0, 0} - end = info.CursorPosition - - case 2: - start = COORD{0, 0} - end = COORD{info.Size.X - 1, info.Size.Y - 1} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - // If the whole buffer was cleared, move the window to the top while preserving - // the window-relative cursor position. - if param == 2 { - pos := info.CursorPosition - window := info.Window - pos.Y -= window.Top - window.Bottom -= window.Top - window.Top = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - return err - } - } - - return nil -} - -func (h *windowsAnsiEventHandler) EL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("EL: [%v]", strconv.Itoa(param)) - h.clearWrap() - - // [K -- Erases from the cursor to the end of the line, including the cursor position. - // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. - // [2K -- Erases the complete line. - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X, info.CursorPosition.Y} - - case 1: - start = COORD{0, info.CursorPosition.Y} - end = info.CursorPosition - - case 2: - start = COORD{0, info.CursorPosition.Y} - end = COORD{info.Size.X, info.CursorPosition.Y} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) IL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("IL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertLines(param) -} - -func (h *windowsAnsiEventHandler) DL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteLines(param) -} - -func (h *windowsAnsiEventHandler) ICH(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("ICH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertCharacters(param) -} - -func (h *windowsAnsiEventHandler) DCH(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DCH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteCharacters(param) -} - -func (h *windowsAnsiEventHandler) SGR(params []int) error { - if err := h.Flush(); err != nil { - return err - } - strings := []string{} - for _, v := range params { - strings = append(strings, strconv.Itoa(v)) - } - - logger.Infof("SGR: [%v]", strings) - - if len(params) <= 0 { - h.attributes = h.infoReset.Attributes - h.inverted = false - } else { - for _, attr := range params { - - if attr == ansiterm.ANSI_SGR_RESET { - h.attributes = h.infoReset.Attributes - h.inverted = false - continue - } - - h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) - } - } - - attributes := h.attributes - if h.inverted { - attributes = invertAttributes(attributes) - } - err := SetConsoleTextAttribute(h.fd, attributes) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) SU(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("SU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollUp(param) -} - -func (h *windowsAnsiEventHandler) SD(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("SD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollDown(param) -} - -func (h *windowsAnsiEventHandler) DA(params []string) error { - logger.Infof("DA: [%v]", params) - // DA cannot be implemented because it must send data on the VT100 input stream, - // which is not available to go-ansiterm. - return nil -} - -func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECSTBM: [%d, %d]", top, bottom) - - // Windows is 0 indexed, Linux is 1 indexed - h.sr.top = int16(top - 1) - h.sr.bottom = int16(bottom - 1) - - // This command also moves the cursor to the origin. - h.clearWrap() - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) RI() error { - if err := h.Flush(); err != nil { - return err - } - logger.Info("RI: []") - h.clearWrap() - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - if info.CursorPosition.Y == sr.top { - return h.scrollDown(1) - } - - return h.moveCursorVertical(-1) -} - -func (h *windowsAnsiEventHandler) IND() error { - logger.Info("IND: []") - return h.executeLF() -} - -func (h *windowsAnsiEventHandler) Flush() error { - h.curInfo = nil - if h.buffer.Len() > 0 { - logger.Infof("Flush: [%s]", h.buffer.Bytes()) - if _, err := h.buffer.WriteTo(h.file); err != nil { - return err - } - } - - if h.wrapNext && !h.drewMarginByte { - logger.Infof("Flush: drawing margin byte '%c'", h.marginByte) - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} - size := COORD{1, 1} - position := COORD{0, 0} - region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} - if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { - return err - } - h.drewMarginByte = true - } - return nil -} - -// cacheConsoleInfo ensures that the current console screen information has been queried -// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. -func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { - if h.curInfo == nil { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return COORD{}, nil, err - } - h.curInfo = info - h.curPos = info.CursorPosition - } - return h.curPos, h.curInfo, nil -} - -func (h *windowsAnsiEventHandler) updatePos(pos COORD) { - if h.curInfo == nil { - panic("failed to call getCurrentInfo before calling updatePos") - } - h.curPos = pos -} - -// clearWrap clears the state where the cursor is in the margin -// waiting for the next character before wrapping the line. This must -// be done before most operations that act on the cursor. -func (h *windowsAnsiEventHandler) clearWrap() { - h.wrapNext = false - h.drewMarginByte = false -} diff --git a/vendor/github.com/Nvveen/Gotty/LICENSE b/vendor/github.com/Nvveen/Gotty/LICENSE deleted file mode 100644 index 0b71c9736..000000000 --- a/vendor/github.com/Nvveen/Gotty/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com) -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The views and conclusions contained in the software and documentation are those -of the authors and should not be interpreted as representing official policies, -either expressed or implied, of the FreeBSD Project. diff --git a/vendor/github.com/Nvveen/Gotty/README b/vendor/github.com/Nvveen/Gotty/README deleted file mode 100644 index a6b0d9a8f..000000000 --- a/vendor/github.com/Nvveen/Gotty/README +++ /dev/null @@ -1,5 +0,0 @@ -Gotty is a library written in Go that determines and reads termcap database -files to produce an interface for interacting with the capabilities of a -terminal. -See the godoc documentation or the source code for more information about -function usage. diff --git a/vendor/github.com/Nvveen/Gotty/attributes.go b/vendor/github.com/Nvveen/Gotty/attributes.go deleted file mode 100644 index a4c005fae..000000000 --- a/vendor/github.com/Nvveen/Gotty/attributes.go +++ /dev/null @@ -1,514 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package gotty - -// Boolean capabilities -var BoolAttr = [...]string{ - "auto_left_margin", "bw", - "auto_right_margin", "am", - "no_esc_ctlc", "xsb", - "ceol_standout_glitch", "xhp", - "eat_newline_glitch", "xenl", - "erase_overstrike", "eo", - "generic_type", "gn", - "hard_copy", "hc", - "has_meta_key", "km", - "has_status_line", "hs", - "insert_null_glitch", "in", - "memory_above", "da", - "memory_below", "db", - "move_insert_mode", "mir", - "move_standout_mode", "msgr", - "over_strike", "os", - "status_line_esc_ok", "eslok", - "dest_tabs_magic_smso", "xt", - "tilde_glitch", "hz", - "transparent_underline", "ul", - "xon_xoff", "nxon", - "needs_xon_xoff", "nxon", - "prtr_silent", "mc5i", - "hard_cursor", "chts", - "non_rev_rmcup", "nrrmc", - "no_pad_char", "npc", - "non_dest_scroll_region", "ndscr", - "can_change", "ccc", - "back_color_erase", "bce", - "hue_lightness_saturation", "hls", - "col_addr_glitch", "xhpa", - "cr_cancels_micro_mode", "crxm", - "has_print_wheel", "daisy", - "row_addr_glitch", "xvpa", - "semi_auto_right_margin", "sam", - "cpi_changes_res", "cpix", - "lpi_changes_res", "lpix", - "backspaces_with_bs", "", - "crt_no_scrolling", "", - "no_correctly_working_cr", "", - "gnu_has_meta_key", "", - "linefeed_is_newline", "", - "has_hardware_tabs", "", - "return_does_clr_eol", "", -} - -// Numerical capabilities -var NumAttr = [...]string{ - "columns", "cols", - "init_tabs", "it", - "lines", "lines", - "lines_of_memory", "lm", - "magic_cookie_glitch", "xmc", - "padding_baud_rate", "pb", - "virtual_terminal", "vt", - "width_status_line", "wsl", - "num_labels", "nlab", - "label_height", "lh", - "label_width", "lw", - "max_attributes", "ma", - "maximum_windows", "wnum", - "max_colors", "colors", - "max_pairs", "pairs", - "no_color_video", "ncv", - "buffer_capacity", "bufsz", - "dot_vert_spacing", "spinv", - "dot_horz_spacing", "spinh", - "max_micro_address", "maddr", - "max_micro_jump", "mjump", - "micro_col_size", "mcs", - "micro_line_size", "mls", - "number_of_pins", "npins", - "output_res_char", "orc", - "output_res_line", "orl", - "output_res_horz_inch", "orhi", - "output_res_vert_inch", "orvi", - "print_rate", "cps", - "wide_char_size", "widcs", - "buttons", "btns", - "bit_image_entwining", "bitwin", - "bit_image_type", "bitype", - "magic_cookie_glitch_ul", "", - "carriage_return_delay", "", - "new_line_delay", "", - "backspace_delay", "", - "horizontal_tab_delay", "", - "number_of_function_keys", "", -} - -// String capabilities -var StrAttr = [...]string{ - "back_tab", "cbt", - "bell", "bel", - "carriage_return", "cr", - "change_scroll_region", "csr", - "clear_all_tabs", "tbc", - "clear_screen", "clear", - "clr_eol", "el", - "clr_eos", "ed", - "column_address", "hpa", - "command_character", "cmdch", - "cursor_address", "cup", - "cursor_down", "cud1", - "cursor_home", "home", - "cursor_invisible", "civis", - "cursor_left", "cub1", - "cursor_mem_address", "mrcup", - "cursor_normal", "cnorm", - "cursor_right", "cuf1", - "cursor_to_ll", "ll", - "cursor_up", "cuu1", - "cursor_visible", "cvvis", - "delete_character", "dch1", - "delete_line", "dl1", - "dis_status_line", "dsl", - "down_half_line", "hd", - "enter_alt_charset_mode", "smacs", - "enter_blink_mode", "blink", - "enter_bold_mode", "bold", - "enter_ca_mode", "smcup", - "enter_delete_mode", "smdc", - "enter_dim_mode", "dim", - "enter_insert_mode", "smir", - "enter_secure_mode", "invis", - "enter_protected_mode", "prot", - "enter_reverse_mode", "rev", - "enter_standout_mode", "smso", - "enter_underline_mode", "smul", - "erase_chars", "ech", - "exit_alt_charset_mode", "rmacs", - "exit_attribute_mode", "sgr0", - "exit_ca_mode", "rmcup", - "exit_delete_mode", "rmdc", - "exit_insert_mode", "rmir", - "exit_standout_mode", "rmso", - "exit_underline_mode", "rmul", - "flash_screen", "flash", - "form_feed", "ff", - "from_status_line", "fsl", - "init_1string", "is1", - "init_2string", "is2", - "init_3string", "is3", - "init_file", "if", - "insert_character", "ich1", - "insert_line", "il1", - "insert_padding", "ip", - "key_backspace", "kbs", - "key_catab", "ktbc", - "key_clear", "kclr", - "key_ctab", "kctab", - "key_dc", "kdch1", - "key_dl", "kdl1", - "key_down", "kcud1", - "key_eic", "krmir", - "key_eol", "kel", - "key_eos", "ked", - "key_f0", "kf0", - "key_f1", "kf1", - "key_f10", "kf10", - "key_f2", "kf2", - "key_f3", "kf3", - "key_f4", "kf4", - "key_f5", "kf5", - "key_f6", "kf6", - "key_f7", "kf7", - "key_f8", "kf8", - "key_f9", "kf9", - "key_home", "khome", - "key_ic", "kich1", - "key_il", "kil1", - "key_left", "kcub1", - "key_ll", "kll", - "key_npage", "knp", - "key_ppage", "kpp", - "key_right", "kcuf1", - "key_sf", "kind", - "key_sr", "kri", - "key_stab", "khts", - "key_up", "kcuu1", - "keypad_local", "rmkx", - "keypad_xmit", "smkx", - "lab_f0", "lf0", - "lab_f1", "lf1", - "lab_f10", "lf10", - "lab_f2", "lf2", - "lab_f3", "lf3", - "lab_f4", "lf4", - "lab_f5", "lf5", - "lab_f6", "lf6", - "lab_f7", "lf7", - "lab_f8", "lf8", - "lab_f9", "lf9", - "meta_off", "rmm", - "meta_on", "smm", - "newline", "_glitch", - "pad_char", "npc", - "parm_dch", "dch", - "parm_delete_line", "dl", - "parm_down_cursor", "cud", - "parm_ich", "ich", - "parm_index", "indn", - "parm_insert_line", "il", - "parm_left_cursor", "cub", - "parm_right_cursor", "cuf", - "parm_rindex", "rin", - "parm_up_cursor", "cuu", - "pkey_key", "pfkey", - "pkey_local", "pfloc", - "pkey_xmit", "pfx", - "print_screen", "mc0", - "prtr_off", "mc4", - "prtr_on", "mc5", - "repeat_char", "rep", - "reset_1string", "rs1", - "reset_2string", "rs2", - "reset_3string", "rs3", - "reset_file", "rf", - "restore_cursor", "rc", - "row_address", "mvpa", - "save_cursor", "row_address", - "scroll_forward", "ind", - "scroll_reverse", "ri", - "set_attributes", "sgr", - "set_tab", "hts", - "set_window", "wind", - "tab", "s_magic_smso", - "to_status_line", "tsl", - "underline_char", "uc", - "up_half_line", "hu", - "init_prog", "iprog", - "key_a1", "ka1", - "key_a3", "ka3", - "key_b2", "kb2", - "key_c1", "kc1", - "key_c3", "kc3", - "prtr_non", "mc5p", - "char_padding", "rmp", - "acs_chars", "acsc", - "plab_norm", "pln", - "key_btab", "kcbt", - "enter_xon_mode", "smxon", - "exit_xon_mode", "rmxon", - "enter_am_mode", "smam", - "exit_am_mode", "rmam", - "xon_character", "xonc", - "xoff_character", "xoffc", - "ena_acs", "enacs", - "label_on", "smln", - "label_off", "rmln", - "key_beg", "kbeg", - "key_cancel", "kcan", - "key_close", "kclo", - "key_command", "kcmd", - "key_copy", "kcpy", - "key_create", "kcrt", - "key_end", "kend", - "key_enter", "kent", - "key_exit", "kext", - "key_find", "kfnd", - "key_help", "khlp", - "key_mark", "kmrk", - "key_message", "kmsg", - "key_move", "kmov", - "key_next", "knxt", - "key_open", "kopn", - "key_options", "kopt", - "key_previous", "kprv", - "key_print", "kprt", - "key_redo", "krdo", - "key_reference", "kref", - "key_refresh", "krfr", - "key_replace", "krpl", - "key_restart", "krst", - "key_resume", "kres", - "key_save", "ksav", - "key_suspend", "kspd", - "key_undo", "kund", - "key_sbeg", "kBEG", - "key_scancel", "kCAN", - "key_scommand", "kCMD", - "key_scopy", "kCPY", - "key_screate", "kCRT", - "key_sdc", "kDC", - "key_sdl", "kDL", - "key_select", "kslt", - "key_send", "kEND", - "key_seol", "kEOL", - "key_sexit", "kEXT", - "key_sfind", "kFND", - "key_shelp", "kHLP", - "key_shome", "kHOM", - "key_sic", "kIC", - "key_sleft", "kLFT", - "key_smessage", "kMSG", - "key_smove", "kMOV", - "key_snext", "kNXT", - "key_soptions", "kOPT", - "key_sprevious", "kPRV", - "key_sprint", "kPRT", - "key_sredo", "kRDO", - "key_sreplace", "kRPL", - "key_sright", "kRIT", - "key_srsume", "kRES", - "key_ssave", "kSAV", - "key_ssuspend", "kSPD", - "key_sundo", "kUND", - "req_for_input", "rfi", - "key_f11", "kf11", - "key_f12", "kf12", - "key_f13", "kf13", - "key_f14", "kf14", - "key_f15", "kf15", - "key_f16", "kf16", - "key_f17", "kf17", - "key_f18", "kf18", - "key_f19", "kf19", - "key_f20", "kf20", - "key_f21", "kf21", - "key_f22", "kf22", - "key_f23", "kf23", - "key_f24", "kf24", - "key_f25", "kf25", - "key_f26", "kf26", - "key_f27", "kf27", - "key_f28", "kf28", - "key_f29", "kf29", - "key_f30", "kf30", - "key_f31", "kf31", - "key_f32", "kf32", - "key_f33", "kf33", - "key_f34", "kf34", - "key_f35", "kf35", - "key_f36", "kf36", - "key_f37", "kf37", - "key_f38", "kf38", - "key_f39", "kf39", - "key_f40", "kf40", - "key_f41", "kf41", - "key_f42", "kf42", - "key_f43", "kf43", - "key_f44", "kf44", - "key_f45", "kf45", - "key_f46", "kf46", - "key_f47", "kf47", - "key_f48", "kf48", - "key_f49", "kf49", - "key_f50", "kf50", - "key_f51", "kf51", - "key_f52", "kf52", - "key_f53", "kf53", - "key_f54", "kf54", - "key_f55", "kf55", - "key_f56", "kf56", - "key_f57", "kf57", - "key_f58", "kf58", - "key_f59", "kf59", - "key_f60", "kf60", - "key_f61", "kf61", - "key_f62", "kf62", - "key_f63", "kf63", - "clr_bol", "el1", - "clear_margins", "mgc", - "set_left_margin", "smgl", - "set_right_margin", "smgr", - "label_format", "fln", - "set_clock", "sclk", - "display_clock", "dclk", - "remove_clock", "rmclk", - "create_window", "cwin", - "goto_window", "wingo", - "hangup", "hup", - "dial_phone", "dial", - "quick_dial", "qdial", - "tone", "tone", - "pulse", "pulse", - "flash_hook", "hook", - "fixed_pause", "pause", - "wait_tone", "wait", - "user0", "u0", - "user1", "u1", - "user2", "u2", - "user3", "u3", - "user4", "u4", - "user5", "u5", - "user6", "u6", - "user7", "u7", - "user8", "u8", - "user9", "u9", - "orig_pair", "op", - "orig_colors", "oc", - "initialize_color", "initc", - "initialize_pair", "initp", - "set_color_pair", "scp", - "set_foreground", "setf", - "set_background", "setb", - "change_char_pitch", "cpi", - "change_line_pitch", "lpi", - "change_res_horz", "chr", - "change_res_vert", "cvr", - "define_char", "defc", - "enter_doublewide_mode", "swidm", - "enter_draft_quality", "sdrfq", - "enter_italics_mode", "sitm", - "enter_leftward_mode", "slm", - "enter_micro_mode", "smicm", - "enter_near_letter_quality", "snlq", - "enter_normal_quality", "snrmq", - "enter_shadow_mode", "sshm", - "enter_subscript_mode", "ssubm", - "enter_superscript_mode", "ssupm", - "enter_upward_mode", "sum", - "exit_doublewide_mode", "rwidm", - "exit_italics_mode", "ritm", - "exit_leftward_mode", "rlm", - "exit_micro_mode", "rmicm", - "exit_shadow_mode", "rshm", - "exit_subscript_mode", "rsubm", - "exit_superscript_mode", "rsupm", - "exit_upward_mode", "rum", - "micro_column_address", "mhpa", - "micro_down", "mcud1", - "micro_left", "mcub1", - "micro_right", "mcuf1", - "micro_row_address", "mvpa", - "micro_up", "mcuu1", - "order_of_pins", "porder", - "parm_down_micro", "mcud", - "parm_left_micro", "mcub", - "parm_right_micro", "mcuf", - "parm_up_micro", "mcuu", - "select_char_set", "scs", - "set_bottom_margin", "smgb", - "set_bottom_margin_parm", "smgbp", - "set_left_margin_parm", "smglp", - "set_right_margin_parm", "smgrp", - "set_top_margin", "smgt", - "set_top_margin_parm", "smgtp", - "start_bit_image", "sbim", - "start_char_set_def", "scsd", - "stop_bit_image", "rbim", - "stop_char_set_def", "rcsd", - "subscript_characters", "subcs", - "superscript_characters", "supcs", - "these_cause_cr", "docr", - "zero_motion", "zerom", - "char_set_names", "csnm", - "key_mouse", "kmous", - "mouse_info", "minfo", - "req_mouse_pos", "reqmp", - "get_mouse", "getm", - "set_a_foreground", "setaf", - "set_a_background", "setab", - "pkey_plab", "pfxl", - "device_type", "devt", - "code_set_init", "csin", - "set0_des_seq", "s0ds", - "set1_des_seq", "s1ds", - "set2_des_seq", "s2ds", - "set3_des_seq", "s3ds", - "set_lr_margin", "smglr", - "set_tb_margin", "smgtb", - "bit_image_repeat", "birep", - "bit_image_newline", "binel", - "bit_image_carriage_return", "bicr", - "color_names", "colornm", - "define_bit_image_region", "defbi", - "end_bit_image_region", "endbi", - "set_color_band", "setcolor", - "set_page_length", "slines", - "display_pc_char", "dispc", - "enter_pc_charset_mode", "smpch", - "exit_pc_charset_mode", "rmpch", - "enter_scancode_mode", "smsc", - "exit_scancode_mode", "rmsc", - "pc_term_options", "pctrm", - "scancode_escape", "scesc", - "alt_scancode_esc", "scesa", - "enter_horizontal_hl_mode", "ehhlm", - "enter_left_hl_mode", "elhlm", - "enter_low_hl_mode", "elohlm", - "enter_right_hl_mode", "erhlm", - "enter_top_hl_mode", "ethlm", - "enter_vertical_hl_mode", "evhlm", - "set_a_attributes", "sgr1", - "set_pglen_inch", "slength", - "termcap_init2", "", - "termcap_reset", "", - "linefeed_if_not_lf", "", - "backspace_if_not_bs", "", - "other_non_function_keys", "", - "arrow_key_map", "", - "acs_ulcorner", "", - "acs_llcorner", "", - "acs_urcorner", "", - "acs_lrcorner", "", - "acs_ltee", "", - "acs_rtee", "", - "acs_btee", "", - "acs_ttee", "", - "acs_hline", "", - "acs_vline", "", - "acs_plus", "", - "memory_lock", "", - "memory_unlock", "", - "box_chars_1", "", -} diff --git a/vendor/github.com/Nvveen/Gotty/gotty.go b/vendor/github.com/Nvveen/Gotty/gotty.go deleted file mode 100644 index 093cbf37e..000000000 --- a/vendor/github.com/Nvveen/Gotty/gotty.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Gotty is a Go-package for reading and parsing the terminfo database -package gotty - -// TODO add more concurrency to name lookup, look for more opportunities. - -import ( - "encoding/binary" - "errors" - "fmt" - "os" - "reflect" - "strings" - "sync" -) - -// Open a terminfo file by the name given and construct a TermInfo object. -// If something went wrong reading the terminfo database file, an error is -// returned. -func OpenTermInfo(termName string) (*TermInfo, error) { - var term *TermInfo - var err error - // Find the environment variables - termloc := os.Getenv("TERMINFO") - if len(termloc) == 0 { - // Search like ncurses - locations := []string{os.Getenv("HOME") + "/.terminfo/", "/etc/terminfo/", - "/lib/terminfo/", "/usr/share/terminfo/"} - var path string - for _, str := range locations { - // Construct path - path = str + string(termName[0]) + "/" + termName - // Check if path can be opened - file, _ := os.Open(path) - if file != nil { - // Path can open, fall out and use current path - file.Close() - break - } - } - if len(path) > 0 { - term, err = readTermInfo(path) - } else { - err = errors.New(fmt.Sprintf("No terminfo file(-location) found")) - } - } - return term, err -} - -// Open a terminfo file from the environment variable containing the current -// terminal name and construct a TermInfo object. If something went wrong -// reading the terminfo database file, an error is returned. -func OpenTermInfoEnv() (*TermInfo, error) { - termenv := os.Getenv("TERM") - return OpenTermInfo(termenv) -} - -// Return an attribute by the name attr provided. If none can be found, -// an error is returned. -func (term *TermInfo) GetAttribute(attr string) (stacker, error) { - // Channel to store the main value in. - var value stacker - // Add a blocking WaitGroup - var block sync.WaitGroup - // Keep track of variable being written. - written := false - // Function to put into goroutine. - f := func(ats interface{}) { - var ok bool - var v stacker - // Switch on type of map to use and assign value to it. - switch reflect.TypeOf(ats).Elem().Kind() { - case reflect.Bool: - v, ok = ats.(map[string]bool)[attr] - case reflect.Int16: - v, ok = ats.(map[string]int16)[attr] - case reflect.String: - v, ok = ats.(map[string]string)[attr] - } - // If ok, a value is found, so we can write. - if ok { - value = v - written = true - } - // Goroutine is done - block.Done() - } - block.Add(3) - // Go for all 3 attribute lists. - go f(term.boolAttributes) - go f(term.numAttributes) - go f(term.strAttributes) - // Wait until every goroutine is done. - block.Wait() - // If a value has been written, return it. - if written { - return value, nil - } - // Otherwise, error. - return nil, fmt.Errorf("Erorr finding attribute") -} - -// Return an attribute by the name attr provided. If none can be found, -// an error is returned. A name is first converted to its termcap value. -func (term *TermInfo) GetAttributeName(name string) (stacker, error) { - tc := GetTermcapName(name) - return term.GetAttribute(tc) -} - -// A utility function that finds and returns the termcap equivalent of a -// variable name. -func GetTermcapName(name string) string { - // Termcap name - var tc string - // Blocking group - var wait sync.WaitGroup - // Function to put into a goroutine - f := func(attrs []string) { - // Find the string corresponding to the name - for i, s := range attrs { - if s == name { - tc = attrs[i+1] - } - } - // Goroutine is finished - wait.Done() - } - wait.Add(3) - // Go for all 3 attribute lists - go f(BoolAttr[:]) - go f(NumAttr[:]) - go f(StrAttr[:]) - // Wait until every goroutine is done - wait.Wait() - // Return the termcap name - return tc -} - -// This function takes a path to a terminfo file and reads it in binary -// form to construct the actual TermInfo file. -func readTermInfo(path string) (*TermInfo, error) { - // Open the terminfo file - file, err := os.Open(path) - defer file.Close() - if err != nil { - return nil, err - } - - // magic, nameSize, boolSize, nrSNum, nrOffsetsStr, strSize - // Header is composed of the magic 0432 octal number, size of the name - // section, size of the boolean section, the amount of number values, - // the number of offsets of strings, and the size of the string section. - var header [6]int16 - // Byte array is used to read in byte values - var byteArray []byte - // Short array is used to read in short values - var shArray []int16 - // TermInfo object to store values - var term TermInfo - - // Read in the header - err = binary.Read(file, binary.LittleEndian, &header) - if err != nil { - return nil, err - } - // If magic number isn't there or isn't correct, we have the wrong filetype - if header[0] != 0432 { - return nil, errors.New(fmt.Sprintf("Wrong filetype")) - } - - // Read in the names - byteArray = make([]byte, header[1]) - err = binary.Read(file, binary.LittleEndian, &byteArray) - if err != nil { - return nil, err - } - term.Names = strings.Split(string(byteArray), "|") - - // Read in the booleans - byteArray = make([]byte, header[2]) - err = binary.Read(file, binary.LittleEndian, &byteArray) - if err != nil { - return nil, err - } - term.boolAttributes = make(map[string]bool) - for i, b := range byteArray { - if b == 1 { - term.boolAttributes[BoolAttr[i*2+1]] = true - } - } - // If the number of bytes read is not even, a byte for alignment is added - if len(byteArray)%2 != 0 { - err = binary.Read(file, binary.LittleEndian, make([]byte, 1)) - if err != nil { - return nil, err - } - } - - // Read in shorts - shArray = make([]int16, header[3]) - err = binary.Read(file, binary.LittleEndian, &shArray) - if err != nil { - return nil, err - } - term.numAttributes = make(map[string]int16) - for i, n := range shArray { - if n != 0377 && n > -1 { - term.numAttributes[NumAttr[i*2+1]] = n - } - } - - // Read the offsets into the short array - shArray = make([]int16, header[4]) - err = binary.Read(file, binary.LittleEndian, &shArray) - if err != nil { - return nil, err - } - // Read the actual strings in the byte array - byteArray = make([]byte, header[5]) - err = binary.Read(file, binary.LittleEndian, &byteArray) - if err != nil { - return nil, err - } - term.strAttributes = make(map[string]string) - // We get an offset, and then iterate until the string is null-terminated - for i, offset := range shArray { - if offset > -1 { - r := offset - for ; byteArray[r] != 0; r++ { - } - term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r]) - } - } - return &term, nil -} diff --git a/vendor/github.com/Nvveen/Gotty/parser.go b/vendor/github.com/Nvveen/Gotty/parser.go deleted file mode 100644 index a9d5d23c5..000000000 --- a/vendor/github.com/Nvveen/Gotty/parser.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package gotty - -import ( - "bytes" - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -var exp = [...]string{ - "%%", - "%c", - "%s", - "%p(\\d)", - "%P([A-z])", - "%g([A-z])", - "%'(.)'", - "%{([0-9]+)}", - "%l", - "%\\+|%-|%\\*|%/|%m", - "%&|%\\||%\\^", - "%=|%>|%<", - "%A|%O", - "%!|%~", - "%i", - "%(:[\\ #\\-\\+]{0,4})?(\\d+\\.\\d+|\\d+)?[doxXs]", - "%\\?(.*?);", -} - -var regex *regexp.Regexp -var staticVar map[byte]stacker - -// Parses the attribute that is received with name attr and parameters params. -func (term *TermInfo) Parse(attr string, params ...interface{}) (string, error) { - // Get the attribute name first. - iface, err := term.GetAttribute(attr) - str, ok := iface.(string) - if err != nil { - return "", err - } - if !ok { - return str, errors.New("Only string capabilities can be parsed.") - } - // Construct the hidden parser struct so we can use a recursive stack based - // parser. - ps := &parser{} - // Dynamic variables only exist in this context. - ps.dynamicVar = make(map[byte]stacker, 26) - ps.parameters = make([]stacker, len(params)) - // Convert the parameters to insert them into the parser struct. - for i, x := range params { - ps.parameters[i] = x - } - // Recursively walk and return. - result, err := ps.walk(str) - return result, err -} - -// Parses the attribute that is received with name attr and parameters params. -// Only works on full name of a capability that is given, which it uses to -// search for the termcap name. -func (term *TermInfo) ParseName(attr string, params ...interface{}) (string, error) { - tc := GetTermcapName(attr) - return term.Parse(tc, params) -} - -// Identify each token in a stack based manner and do the actual parsing. -func (ps *parser) walk(attr string) (string, error) { - // We use a buffer to get the modified string. - var buf bytes.Buffer - // Next, find and identify all tokens by their indices and strings. - tokens := regex.FindAllStringSubmatch(attr, -1) - if len(tokens) == 0 { - return attr, nil - } - indices := regex.FindAllStringIndex(attr, -1) - q := 0 // q counts the matches of one token - // Iterate through the string per character. - for i := 0; i < len(attr); i++ { - // If the current position is an identified token, execute the following - // steps. - if q < len(indices) && i >= indices[q][0] && i < indices[q][1] { - // Switch on token. - switch { - case tokens[q][0][:2] == "%%": - // Literal percentage character. - buf.WriteByte('%') - case tokens[q][0][:2] == "%c": - // Pop a character. - c, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - buf.WriteByte(c.(byte)) - case tokens[q][0][:2] == "%s": - // Pop a string. - str, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - if _, ok := str.(string); !ok { - return buf.String(), errors.New("Stack head is not a string") - } - buf.WriteString(str.(string)) - case tokens[q][0][:2] == "%p": - // Push a parameter on the stack. - index, err := strconv.ParseInt(tokens[q][1], 10, 8) - index-- - if err != nil { - return buf.String(), err - } - if int(index) >= len(ps.parameters) { - return buf.String(), errors.New("Parameters index out of bound") - } - ps.st.push(ps.parameters[index]) - case tokens[q][0][:2] == "%P": - // Pop a variable from the stack as a dynamic or static variable. - val, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - index := tokens[q][2] - if len(index) > 1 { - errorStr := fmt.Sprintf("%s is not a valid dynamic variables index", - index) - return buf.String(), errors.New(errorStr) - } - // Specify either dynamic or static. - if index[0] >= 'a' && index[0] <= 'z' { - ps.dynamicVar[index[0]] = val - } else if index[0] >= 'A' && index[0] <= 'Z' { - staticVar[index[0]] = val - } - case tokens[q][0][:2] == "%g": - // Push a variable from the stack as a dynamic or static variable. - index := tokens[q][3] - if len(index) > 1 { - errorStr := fmt.Sprintf("%s is not a valid static variables index", - index) - return buf.String(), errors.New(errorStr) - } - var val stacker - if index[0] >= 'a' && index[0] <= 'z' { - val = ps.dynamicVar[index[0]] - } else if index[0] >= 'A' && index[0] <= 'Z' { - val = staticVar[index[0]] - } - ps.st.push(val) - case tokens[q][0][:2] == "%'": - // Push a character constant. - con := tokens[q][4] - if len(con) > 1 { - errorStr := fmt.Sprintf("%s is not a valid character constant", con) - return buf.String(), errors.New(errorStr) - } - ps.st.push(con[0]) - case tokens[q][0][:2] == "%{": - // Push an integer constant. - con, err := strconv.ParseInt(tokens[q][5], 10, 32) - if err != nil { - return buf.String(), err - } - ps.st.push(con) - case tokens[q][0][:2] == "%l": - // Push the length of the string that is popped from the stack. - popStr, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - if _, ok := popStr.(string); !ok { - errStr := fmt.Sprintf("Stack head is not a string") - return buf.String(), errors.New(errStr) - } - ps.st.push(len(popStr.(string))) - case tokens[q][0][:2] == "%?": - // If-then-else construct. First, the whole string is identified and - // then inside this substring, we can specify which parts to switch on. - ifReg, _ := regexp.Compile("%\\?(.*)%t(.*)%e(.*);|%\\?(.*)%t(.*);") - ifTokens := ifReg.FindStringSubmatch(tokens[q][0]) - var ( - ifStr string - err error - ) - // Parse the if-part to determine if-else. - if len(ifTokens[1]) > 0 { - ifStr, err = ps.walk(ifTokens[1]) - } else { // else - ifStr, err = ps.walk(ifTokens[4]) - } - // Return any errors - if err != nil { - return buf.String(), err - } else if len(ifStr) > 0 { - // Self-defined limitation, not sure if this is correct, but didn't - // seem like it. - return buf.String(), errors.New("If-clause cannot print statements") - } - var thenStr string - // Pop the first value that is set by parsing the if-clause. - choose, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - // Switch to if or else. - if choose.(int) == 0 && len(ifTokens[1]) > 0 { - thenStr, err = ps.walk(ifTokens[3]) - } else if choose.(int) != 0 { - if len(ifTokens[1]) > 0 { - thenStr, err = ps.walk(ifTokens[2]) - } else { - thenStr, err = ps.walk(ifTokens[5]) - } - } - if err != nil { - return buf.String(), err - } - buf.WriteString(thenStr) - case tokens[q][0][len(tokens[q][0])-1] == 'd': // Fallthrough for printing - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 'o': // digits. - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 'x': - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 'X': - fallthrough - case tokens[q][0][len(tokens[q][0])-1] == 's': - token := tokens[q][0] - // Remove the : that comes before a flag. - if token[1] == ':' { - token = token[:1] + token[2:] - } - digit, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - // The rest is determined like the normal formatted prints. - digitStr := fmt.Sprintf(token, digit.(int)) - buf.WriteString(digitStr) - case tokens[q][0][:2] == "%i": - // Increment the parameters by one. - if len(ps.parameters) < 2 { - return buf.String(), errors.New("Not enough parameters to increment.") - } - val1, val2 := ps.parameters[0].(int), ps.parameters[1].(int) - val1++ - val2++ - ps.parameters[0], ps.parameters[1] = val1, val2 - default: - // The rest of the tokens is a special case, where two values are - // popped and then operated on by the token that comes after them. - op1, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - op2, err := ps.st.pop() - if err != nil { - return buf.String(), err - } - var result stacker - switch tokens[q][0][:2] { - case "%+": - // Addition - result = op2.(int) + op1.(int) - case "%-": - // Subtraction - result = op2.(int) - op1.(int) - case "%*": - // Multiplication - result = op2.(int) * op1.(int) - case "%/": - // Division - result = op2.(int) / op1.(int) - case "%m": - // Modulo - result = op2.(int) % op1.(int) - case "%&": - // Bitwise AND - result = op2.(int) & op1.(int) - case "%|": - // Bitwise OR - result = op2.(int) | op1.(int) - case "%^": - // Bitwise XOR - result = op2.(int) ^ op1.(int) - case "%=": - // Equals - result = op2 == op1 - case "%>": - // Greater-than - result = op2.(int) > op1.(int) - case "%<": - // Lesser-than - result = op2.(int) < op1.(int) - case "%A": - // Logical AND - result = op2.(bool) && op1.(bool) - case "%O": - // Logical OR - result = op2.(bool) || op1.(bool) - case "%!": - // Logical complement - result = !op1.(bool) - case "%~": - // Bitwise complement - result = ^(op1.(int)) - } - ps.st.push(result) - } - - i = indices[q][1] - 1 - q++ - } else { - // We are not "inside" a token, so just skip until the end or the next - // token, and add all characters to the buffer. - j := i - if q != len(indices) { - for !(j >= indices[q][0] && j < indices[q][1]) { - j++ - } - } else { - j = len(attr) - } - buf.WriteString(string(attr[i:j])) - i = j - } - } - // Return the buffer as a string. - return buf.String(), nil -} - -// Push a stacker-value onto the stack. -func (st *stack) push(s stacker) { - *st = append(*st, s) -} - -// Pop a stacker-value from the stack. -func (st *stack) pop() (stacker, error) { - if len(*st) == 0 { - return nil, errors.New("Stack is empty.") - } - newStack := make(stack, len(*st)-1) - val := (*st)[len(*st)-1] - copy(newStack, (*st)[:len(*st)-1]) - *st = newStack - return val, nil -} - -// Initialize regexes and the static vars (that don't get changed between -// calls. -func init() { - // Initialize the main regex. - expStr := strings.Join(exp[:], "|") - regex, _ = regexp.Compile(expStr) - // Initialize the static variables. - staticVar = make(map[byte]stacker, 26) -} diff --git a/vendor/github.com/Nvveen/Gotty/types.go b/vendor/github.com/Nvveen/Gotty/types.go deleted file mode 100644 index 9bcc65e9b..000000000 --- a/vendor/github.com/Nvveen/Gotty/types.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 Neal van Veen. All rights reserved. -// Usage of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -package gotty - -type TermInfo struct { - boolAttributes map[string]bool - numAttributes map[string]int16 - strAttributes map[string]string - // The various names of the TermInfo file. - Names []string -} - -type stacker interface { -} -type stack []stacker - -type parser struct { - st stack - parameters []stacker - dynamicVar map[byte]stacker -} diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go deleted file mode 100644 index d0813965b..000000000 --- a/vendor/github.com/docker/docker-credential-helpers/client/client.go +++ /dev/null @@ -1,87 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" - - "github.com/docker/docker-credential-helpers/credentials" -) - -// Store uses an external program to save credentials. -func Store(program ProgramFunc, credentials *credentials.Credentials) error { - cmd := program("store") - - buffer := new(bytes.Buffer) - if err := json.NewEncoder(buffer).Encode(credentials); err != nil { - return err - } - cmd.Input(buffer) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) - } - - return nil -} - -// Get executes an external program to get the credentials from a native store. -func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { - cmd := program("get") - cmd.Input(strings.NewReader(serverURL)) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if credentials.IsErrCredentialsNotFoundMessage(t) { - return nil, credentials.NewErrCredentialsNotFound() - } - - return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) - } - - resp := &credentials.Credentials{ - ServerURL: serverURL, - } - - if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { - return nil, err - } - - return resp, nil -} - -// Erase executes a program to remove the server credentials from the native store. -func Erase(program ProgramFunc, serverURL string) error { - cmd := program("erase") - cmd.Input(strings.NewReader(serverURL)) - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) - } - - return nil -} - -// List executes a program to list server credentials in the native store. -func List(program ProgramFunc) (map[string]string, error) { - cmd := program("list") - cmd.Input(strings.NewReader("unused")) - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) - } - - var resp map[string]string - if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { - return nil, err - } - - return resp, nil -} diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go deleted file mode 100644 index 8983da694..000000000 --- a/vendor/github.com/docker/docker-credential-helpers/client/command.go +++ /dev/null @@ -1,37 +0,0 @@ -package client - -import ( - "io" - "os/exec" -) - -// Program is an interface to execute external programs. -type Program interface { - Output() ([]byte, error) - Input(in io.Reader) -} - -// ProgramFunc is a type of function that initializes programs based on arguments. -type ProgramFunc func(args ...string) Program - -// NewShellProgramFunc creates programs that are executed in a Shell. -func NewShellProgramFunc(name string) ProgramFunc { - return func(args ...string) Program { - return &Shell{cmd: exec.Command(name, args...)} - } -} - -// Shell invokes shell commands to talk with a remote credentials helper. -type Shell struct { - cmd *exec.Cmd -} - -// Output returns responses from the remote credentials helper. -func (s *Shell) Output() ([]byte, error) { - return s.cmd.Output() -} - -// Input sets the input to send to a remote credentials helper. -func (s *Shell) Input(in io.Reader) { - s.cmd.Stdin = in -} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go deleted file mode 100644 index cfe0cb134..000000000 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go +++ /dev/null @@ -1,150 +0,0 @@ -package credentials - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io" - "os" - "strings" -) - -// Credentials holds the information shared between docker and the credentials store. -type Credentials struct { - ServerURL string - Username string - Secret string -} - -// Docker credentials should be labeled as such in credentials stores that allow labelling. -// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, -// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" -var CredsLabel = "Docker Credentials" - -func SetCredsLabel(label string) { - CredsLabel = label -} - -// Serve initializes the credentials helper and parses the action argument. -// This function is designed to be called from a command line interface. -// It uses os.Args[1] as the key for the action. -// It uses os.Stdin as input and os.Stdout as output. -// This function terminates the program with os.Exit(1) if there is an error. -func Serve(helper Helper) { - var err error - if len(os.Args) != 2 { - err = fmt.Errorf("Usage: %s ", os.Args[0]) - } - - if err == nil { - err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) - } - - if err != nil { - fmt.Fprintf(os.Stdout, "%v\n", err) - os.Exit(1) - } -} - -// HandleCommand uses a helper and a key to run a credential action. -func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { - switch key { - case "store": - return Store(helper, in) - case "get": - return Get(helper, in, out) - case "erase": - return Erase(helper, in) - case "list": - return List(helper, out) - } - return fmt.Errorf("Unknown credential action `%s`", key) -} - -// Store uses a helper and an input reader to save credentials. -// The reader must contain the JSON serialization of a Credentials struct. -func Store(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - var creds Credentials - if err := json.NewDecoder(buffer).Decode(&creds); err != nil { - return err - } - - return helper.Add(&creds) -} - -// Get retrieves the credentials for a given server url. -// The reader must contain the server URL to search. -// The writer is used to write the JSON serialization of the credentials. -func Get(helper Helper, reader io.Reader, writer io.Writer) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - - username, secret, err := helper.Get(serverURL) - if err != nil { - return err - } - - resp := Credentials{ - Username: username, - Secret: secret, - } - - buffer.Reset() - if err := json.NewEncoder(buffer).Encode(resp); err != nil { - return err - } - - fmt.Fprint(writer, buffer.String()) - return nil -} - -// Erase removes credentials from the store. -// The reader must contain the server URL to remove. -func Erase(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - - return helper.Delete(serverURL) -} - -//List returns all the serverURLs of keys in -//the OS store as a list of strings -func List(helper Helper, writer io.Writer) error { - accts, err := helper.List() - if err != nil { - return err - } - return json.NewEncoder(writer).Encode(accts) -} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go deleted file mode 100644 index d24bf16f0..000000000 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go +++ /dev/null @@ -1,37 +0,0 @@ -package credentials - -// ErrCredentialsNotFound standarizes the not found error, so every helper returns -// the same message and docker can handle it properly. -const errCredentialsNotFoundMessage = "credentials not found in native keychain" - -// errCredentialsNotFound represents an error -// raised when credentials are not in the store. -type errCredentialsNotFound struct{} - -// Error returns the standard error message -// for when the credentials are not in the store. -func (errCredentialsNotFound) Error() string { - return errCredentialsNotFoundMessage -} - -// NewErrCredentialsNotFound creates a new error -// for when the credentials are not in the store. -func NewErrCredentialsNotFound() error { - return errCredentialsNotFound{} -} - -// IsErrCredentialsNotFound returns true if the error -// was caused by not having a set of credentials in a store. -func IsErrCredentialsNotFound(err error) bool { - _, ok := err.(errCredentialsNotFound) - return ok -} - -// IsErrCredentialsNotFoundMessage returns true if the error -// was caused by not having a set of credentials in a store. -// -// This function helps to check messages returned by an -// external program via its standard output. -func IsErrCredentialsNotFoundMessage(err string) bool { - return err == errCredentialsNotFoundMessage -} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go deleted file mode 100644 index 135acd254..000000000 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go +++ /dev/null @@ -1,14 +0,0 @@ -package credentials - -// Helper is the interface a credentials store helper must implement. -type Helper interface { - // Add appends credentials to the store. - Add(*Credentials) error - // Delete removes credentials from the store. - Delete(serverURL string) error - // Get retrieves credentials from the store. - // It returns username and secret as strings. - Get(serverURL string) (string, string, error) - // List returns the stored serverURLs and their associated usernames. - List() (map[string]string, error) -} diff --git a/vendor/github.com/docker/docker/cli/command/cli.go b/vendor/github.com/docker/docker/cli/command/cli.go deleted file mode 100644 index 82940169e..000000000 --- a/vendor/github.com/docker/docker/cli/command/cli.go +++ /dev/null @@ -1,310 +0,0 @@ -package command - -import ( - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "runtime" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - cliconfig "github.com/docker/docker/cli/config" - "github.com/docker/docker/cli/config/configfile" - "github.com/docker/docker/cli/config/credentials" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/client" - "github.com/docker/docker/dockerversion" - dopts "github.com/docker/docker/opts" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/docker/notary/passphrase" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -// Streams is an interface which exposes the standard input and output streams -type Streams interface { - In() *InStream - Out() *OutStream - Err() io.Writer -} - -// Cli represents the docker command line client. -type Cli interface { - Client() client.APIClient - Out() *OutStream - Err() io.Writer - In() *InStream - SetIn(in *InStream) - ConfigFile() *configfile.ConfigFile - CredentialsStore(serverAddress string) credentials.Store -} - -// DockerCli is an instance the docker command line client. -// Instances of the client can be returned from NewDockerCli. -type DockerCli struct { - configFile *configfile.ConfigFile - in *InStream - out *OutStream - err io.Writer - keyFile string - client client.APIClient - defaultVersion string - server ServerInfo -} - -// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified. -func (cli *DockerCli) DefaultVersion() string { - return cli.defaultVersion -} - -// Client returns the APIClient -func (cli *DockerCli) Client() client.APIClient { - return cli.client -} - -// Out returns the writer used for stdout -func (cli *DockerCli) Out() *OutStream { - return cli.out -} - -// Err returns the writer used for stderr -func (cli *DockerCli) Err() io.Writer { - return cli.err -} - -// SetIn sets the reader used for stdin -func (cli *DockerCli) SetIn(in *InStream) { - cli.in = in -} - -// In returns the reader used for stdin -func (cli *DockerCli) In() *InStream { - return cli.in -} - -// ShowHelp shows the command help. -func (cli *DockerCli) ShowHelp(cmd *cobra.Command, args []string) error { - cmd.SetOutput(cli.err) - cmd.HelpFunc()(cmd, args) - return nil -} - -// ConfigFile returns the ConfigFile -func (cli *DockerCli) ConfigFile() *configfile.ConfigFile { - return cli.configFile -} - -// ServerInfo returns the server version details for the host this client is -// connected to -func (cli *DockerCli) ServerInfo() ServerInfo { - return cli.server -} - -// GetAllCredentials returns all of the credentials stored in all of the -// configured credential stores. -func (cli *DockerCli) GetAllCredentials() (map[string]types.AuthConfig, error) { - auths := make(map[string]types.AuthConfig) - for registry := range cli.configFile.CredentialHelpers { - helper := cli.CredentialsStore(registry) - newAuths, err := helper.GetAll() - if err != nil { - return nil, err - } - addAll(auths, newAuths) - } - defaultStore := cli.CredentialsStore("") - newAuths, err := defaultStore.GetAll() - if err != nil { - return nil, err - } - addAll(auths, newAuths) - return auths, nil -} - -func addAll(to, from map[string]types.AuthConfig) { - for reg, ac := range from { - to[reg] = ac - } -} - -// CredentialsStore returns a new credentials store based -// on the settings provided in the configuration file. Empty string returns -// the default credential store. -func (cli *DockerCli) CredentialsStore(serverAddress string) credentials.Store { - if helper := getConfiguredCredentialStore(cli.configFile, serverAddress); helper != "" { - return credentials.NewNativeStore(cli.configFile, helper) - } - return credentials.NewFileStore(cli.configFile) -} - -// getConfiguredCredentialStore returns the credential helper configured for the -// given registry, the default credsStore, or the empty string if neither are -// configured. -func getConfiguredCredentialStore(c *configfile.ConfigFile, serverAddress string) string { - if c.CredentialHelpers != nil && serverAddress != "" { - if helper, exists := c.CredentialHelpers[serverAddress]; exists { - return helper - } - } - return c.CredentialsStore -} - -// Initialize the dockerCli runs initialization that must happen after command -// line flags are parsed. -func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error { - cli.configFile = LoadDefaultConfigFile(cli.err) - - var err error - cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile) - if tlsconfig.IsErrEncryptedKey(err) { - var ( - passwd string - giveup bool - ) - passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil) - - for attempts := 0; tlsconfig.IsErrEncryptedKey(err); attempts++ { - // some code and comments borrowed from notary/trustmanager/keystore.go - passwd, giveup, err = passRetriever("private", "encrypted TLS private", false, attempts) - // Check if the passphrase retriever got an error or if it is telling us to give up - if giveup || err != nil { - return errors.Wrap(err, "private key is encrypted, but could not get passphrase") - } - - opts.Common.TLSOptions.Passphrase = passwd - cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile) - } - } - - if err != nil { - return err - } - - cli.defaultVersion = cli.client.ClientVersion() - - if opts.Common.TrustKey == "" { - cli.keyFile = filepath.Join(cliconfig.Dir(), cliflags.DefaultTrustKeyFile) - } else { - cli.keyFile = opts.Common.TrustKey - } - - if ping, err := cli.client.Ping(context.Background()); err == nil { - cli.server = ServerInfo{ - HasExperimental: ping.Experimental, - OSType: ping.OSType, - } - - // since the new header was added in 1.25, assume server is 1.24 if header is not present. - if ping.APIVersion == "" { - ping.APIVersion = "1.24" - } - - // if server version is lower than the current cli, downgrade - if versions.LessThan(ping.APIVersion, cli.client.ClientVersion()) { - cli.client.UpdateClientVersion(ping.APIVersion) - } - } - - return nil -} - -// ServerInfo stores details about the supported features and platform of the -// server -type ServerInfo struct { - HasExperimental bool - OSType string -} - -// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. -func NewDockerCli(in io.ReadCloser, out, err io.Writer) *DockerCli { - return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err} -} - -// LoadDefaultConfigFile attempts to load the default config file and returns -// an initialized ConfigFile struct if none is found. -func LoadDefaultConfigFile(err io.Writer) *configfile.ConfigFile { - configFile, e := cliconfig.Load(cliconfig.Dir()) - if e != nil { - fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e) - } - if !configFile.ContainsAuth() { - credentials.DetectDefaultStore(configFile) - } - return configFile -} - -// NewAPIClientFromFlags creates a new APIClient from command line flags -func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) { - host, err := getServerHost(opts.Hosts, opts.TLSOptions) - if err != nil { - return &client.Client{}, err - } - - customHeaders := configFile.HTTPHeaders - if customHeaders == nil { - customHeaders = map[string]string{} - } - customHeaders["User-Agent"] = UserAgent() - - verStr := api.DefaultVersion - if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { - verStr = tmpStr - } - - httpClient, err := newHTTPClient(host, opts.TLSOptions) - if err != nil { - return &client.Client{}, err - } - - return client.NewClient(host, verStr, httpClient, customHeaders) -} - -func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { - switch len(hosts) { - case 0: - host = os.Getenv("DOCKER_HOST") - case 1: - host = hosts[0] - default: - return "", errors.New("Please specify only one -H") - } - - host, err = dopts.ParseHost(tlsOptions != nil, host) - return -} - -func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { - if tlsOptions == nil { - // let the api client configure the default transport. - return nil, nil - } - opts := *tlsOptions - opts.ExclusiveRootPools = true - config, err := tlsconfig.Client(opts) - if err != nil { - return nil, err - } - tr := &http.Transport{ - TLSClientConfig: config, - } - proto, addr, _, err := client.ParseHost(host) - if err != nil { - return nil, err - } - - sockets.ConfigureTransport(tr, proto, addr) - - return &http.Client{ - Transport: tr, - }, nil -} - -// UserAgent returns the user agent string used for making API requests -func UserAgent() string { - return "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")" -} diff --git a/vendor/github.com/docker/docker/cli/command/events_utils.go b/vendor/github.com/docker/docker/cli/command/events_utils.go deleted file mode 100644 index e710c9757..000000000 --- a/vendor/github.com/docker/docker/cli/command/events_utils.go +++ /dev/null @@ -1,49 +0,0 @@ -package command - -import ( - "sync" - - "github.com/Sirupsen/logrus" - eventtypes "github.com/docker/docker/api/types/events" -) - -type eventProcessor func(eventtypes.Message, error) error - -// EventHandler is abstract interface for user to customize -// own handle functions of each type of events -type EventHandler interface { - Handle(action string, h func(eventtypes.Message)) - Watch(c <-chan eventtypes.Message) -} - -// InitEventHandler initializes and returns an EventHandler -func InitEventHandler() EventHandler { - return &eventHandler{handlers: make(map[string]func(eventtypes.Message))} -} - -type eventHandler struct { - handlers map[string]func(eventtypes.Message) - mu sync.Mutex -} - -func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { - w.mu.Lock() - w.handlers[action] = h - w.mu.Unlock() -} - -// Watch ranges over the passed in event chan and processes the events based on the -// handlers created for a given action. -// To stop watching, close the event chan. -func (w *eventHandler) Watch(c <-chan eventtypes.Message) { - for e := range c { - w.mu.Lock() - h, exists := w.handlers[e.Action] - w.mu.Unlock() - if !exists { - continue - } - logrus.Debugf("event handler: received event: %v", e) - go h(e) - } -} diff --git a/vendor/github.com/docker/docker/cli/command/in.go b/vendor/github.com/docker/docker/cli/command/in.go deleted file mode 100644 index 54855c6dc..000000000 --- a/vendor/github.com/docker/docker/cli/command/in.go +++ /dev/null @@ -1,56 +0,0 @@ -package command - -import ( - "errors" - "io" - "os" - "runtime" - - "github.com/docker/docker/pkg/term" -) - -// InStream is an input stream used by the DockerCli to read user input -type InStream struct { - CommonStream - in io.ReadCloser -} - -func (i *InStream) Read(p []byte) (int, error) { - return i.in.Read(p) -} - -// Close implements the Closer interface -func (i *InStream) Close() error { - return i.in.Close() -} - -// SetRawTerminal sets raw mode on the input terminal -func (i *InStream) SetRawTerminal() (err error) { - if os.Getenv("NORAW") != "" || !i.CommonStream.isTerminal { - return nil - } - i.CommonStream.state, err = term.SetRawTerminal(i.CommonStream.fd) - return err -} - -// CheckTty checks if we are trying to attach to a container tty -// from a non-tty client input stream, and if so, returns an error. -func (i *InStream) CheckTty(attachStdin, ttyMode bool) error { - // In order to attach to a container tty, input stream for the client must - // be a tty itself: redirecting or piping the client standard input is - // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. - if ttyMode && attachStdin && !i.isTerminal { - eText := "the input device is not a TTY" - if runtime.GOOS == "windows" { - return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'") - } - return errors.New(eText) - } - return nil -} - -// NewInStream returns a new InStream object from a ReadCloser -func NewInStream(in io.ReadCloser) *InStream { - fd, isTerminal := term.GetFdInfo(in) - return &InStream{CommonStream: CommonStream{fd: fd, isTerminal: isTerminal}, in: in} -} diff --git a/vendor/github.com/docker/docker/cli/command/out.go b/vendor/github.com/docker/docker/cli/command/out.go deleted file mode 100644 index 27b44c235..000000000 --- a/vendor/github.com/docker/docker/cli/command/out.go +++ /dev/null @@ -1,50 +0,0 @@ -package command - -import ( - "io" - "os" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/term" -) - -// OutStream is an output stream used by the DockerCli to write normal program -// output. -type OutStream struct { - CommonStream - out io.Writer -} - -func (o *OutStream) Write(p []byte) (int, error) { - return o.out.Write(p) -} - -// SetRawTerminal sets raw mode on the input terminal -func (o *OutStream) SetRawTerminal() (err error) { - if os.Getenv("NORAW") != "" || !o.CommonStream.isTerminal { - return nil - } - o.CommonStream.state, err = term.SetRawTerminalOutput(o.CommonStream.fd) - return err -} - -// GetTtySize returns the height and width in characters of the tty -func (o *OutStream) GetTtySize() (uint, uint) { - if !o.isTerminal { - return 0, 0 - } - ws, err := term.GetWinsize(o.fd) - if err != nil { - logrus.Debugf("Error getting size: %s", err) - if ws == nil { - return 0, 0 - } - } - return uint(ws.Height), uint(ws.Width) -} - -// NewOutStream returns a new OutStream object from a Writer -func NewOutStream(out io.Writer) *OutStream { - fd, isTerminal := term.GetFdInfo(out) - return &OutStream{CommonStream: CommonStream{fd: fd, isTerminal: isTerminal}, out: out} -} diff --git a/vendor/github.com/docker/docker/cli/command/registry.go b/vendor/github.com/docker/docker/cli/command/registry.go deleted file mode 100644 index 884fa6ec4..000000000 --- a/vendor/github.com/docker/docker/cli/command/registry.go +++ /dev/null @@ -1,187 +0,0 @@ -package command - -import ( - "bufio" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "os" - "runtime" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/registry" - "github.com/pkg/errors" -) - -// ElectAuthServer returns the default registry to use (by asking the daemon) -func ElectAuthServer(ctx context.Context, cli Cli) string { - // The daemon `/info` endpoint informs us of the default registry being - // used. This is essential in cross-platforms environment, where for - // example a Linux client might be interacting with a Windows daemon, hence - // the default registry URL might be Windows specific. - serverAddress := registry.IndexServer - if info, err := cli.Client().Info(ctx); err != nil { - fmt.Fprintf(cli.Out(), "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) - } else { - serverAddress = info.IndexServerAddress - } - return serverAddress -} - -// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload -func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return "", err - } - return base64.URLEncoding.EncodeToString(buf), nil -} - -// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info -// for the given command. -func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc { - return func() (string, error) { - fmt.Fprintf(cli.Out(), "\nPlease login prior to %s:\n", cmdName) - indexServer := registry.GetAuthConfigKey(index) - isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli) - authConfig, err := ConfigureAuth(cli, "", "", indexServer, isDefaultRegistry) - if err != nil { - return "", err - } - return EncodeAuthToBase64(authConfig) - } -} - -// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the -// default index, it uses the default index name for the daemon's platform, -// not the client's platform. -func ResolveAuthConfig(ctx context.Context, cli Cli, index *registrytypes.IndexInfo) types.AuthConfig { - configKey := index.Name - if index.Official { - configKey = ElectAuthServer(ctx, cli) - } - - a, _ := cli.CredentialsStore(configKey).Get(configKey) - return a -} - -// ConfigureAuth returns an AuthConfig from the specified user, password and server. -func ConfigureAuth(cli Cli, flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) { - // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 - if runtime.GOOS == "windows" { - cli.SetIn(NewInStream(os.Stdin)) - } - - if !isDefaultRegistry { - serverAddress = registry.ConvertToHostname(serverAddress) - } - - authconfig, err := cli.CredentialsStore(serverAddress).Get(serverAddress) - if err != nil { - return authconfig, err - } - - // Some links documenting this: - // - https://code.google.com/archive/p/mintty/issues/56 - // - https://github.com/docker/docker/issues/15272 - // - https://mintty.github.io/ (compatibility) - // Linux will hit this if you attempt `cat | docker login`, and Windows - // will hit this if you attempt docker login from mintty where stdin - // is a pipe, not a character based console. - if flPassword == "" && !cli.In().IsTerminal() { - return authconfig, errors.Errorf("Error: Cannot perform an interactive login from a non TTY device") - } - - authconfig.Username = strings.TrimSpace(authconfig.Username) - - if flUser = strings.TrimSpace(flUser); flUser == "" { - if isDefaultRegistry { - // if this is a default registry (docker hub), then display the following message. - fmt.Fprintln(cli.Out(), "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") - } - promptWithDefault(cli.Out(), "Username", authconfig.Username) - flUser = readInput(cli.In(), cli.Out()) - flUser = strings.TrimSpace(flUser) - if flUser == "" { - flUser = authconfig.Username - } - } - if flUser == "" { - return authconfig, errors.Errorf("Error: Non-null Username Required") - } - if flPassword == "" { - oldState, err := term.SaveState(cli.In().FD()) - if err != nil { - return authconfig, err - } - fmt.Fprintf(cli.Out(), "Password: ") - term.DisableEcho(cli.In().FD(), oldState) - - flPassword = readInput(cli.In(), cli.Out()) - fmt.Fprint(cli.Out(), "\n") - - term.RestoreTerminal(cli.In().FD(), oldState) - if flPassword == "" { - return authconfig, errors.Errorf("Error: Password Required") - } - } - - authconfig.Username = flUser - authconfig.Password = flPassword - authconfig.ServerAddress = serverAddress - authconfig.IdentityToken = "" - - return authconfig, nil -} - -func readInput(in io.Reader, out io.Writer) string { - reader := bufio.NewReader(in) - line, _, err := reader.ReadLine() - if err != nil { - fmt.Fprintln(out, err.Error()) - os.Exit(1) - } - return string(line) -} - -func promptWithDefault(out io.Writer, prompt string, configDefault string) { - if configDefault == "" { - fmt.Fprintf(out, "%s: ", prompt) - } else { - fmt.Fprintf(out, "%s (%s): ", prompt, configDefault) - } -} - -// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image -func RetrieveAuthTokenFromImage(ctx context.Context, cli Cli, image string) (string, error) { - // Retrieve encoded auth token from the image reference - authConfig, err := resolveAuthConfigFromImage(ctx, cli, image) - if err != nil { - return "", err - } - encodedAuth, err := EncodeAuthToBase64(authConfig) - if err != nil { - return "", err - } - return encodedAuth, nil -} - -// resolveAuthConfigFromImage retrieves that AuthConfig using the image string -func resolveAuthConfigFromImage(ctx context.Context, cli Cli, image string) (types.AuthConfig, error) { - registryRef, err := reference.ParseNormalizedNamed(image) - if err != nil { - return types.AuthConfig{}, err - } - repoInfo, err := registry.ParseRepositoryInfo(registryRef) - if err != nil { - return types.AuthConfig{}, err - } - return ResolveAuthConfig(ctx, cli, repoInfo.Index), nil -} diff --git a/vendor/github.com/docker/docker/cli/command/stream.go b/vendor/github.com/docker/docker/cli/command/stream.go deleted file mode 100644 index 71a43fa2e..000000000 --- a/vendor/github.com/docker/docker/cli/command/stream.go +++ /dev/null @@ -1,34 +0,0 @@ -package command - -import ( - "github.com/docker/docker/pkg/term" -) - -// CommonStream is an input stream used by the DockerCli to read user input -type CommonStream struct { - fd uintptr - isTerminal bool - state *term.State -} - -// FD returns the file descriptor number for this stream -func (s *CommonStream) FD() uintptr { - return s.fd -} - -// IsTerminal returns true if this stream is connected to a terminal -func (s *CommonStream) IsTerminal() bool { - return s.isTerminal -} - -// RestoreTerminal restores normal mode to the terminal -func (s *CommonStream) RestoreTerminal() { - if s.state != nil { - term.RestoreTerminal(s.fd, s.state) - } -} - -// SetIsTerminal sets the boolean used for isTerminal -func (s *CommonStream) SetIsTerminal(isTerminal bool) { - s.isTerminal = isTerminal -} diff --git a/vendor/github.com/docker/docker/cli/command/trust.go b/vendor/github.com/docker/docker/cli/command/trust.go deleted file mode 100644 index c0742bc5b..000000000 --- a/vendor/github.com/docker/docker/cli/command/trust.go +++ /dev/null @@ -1,43 +0,0 @@ -package command - -import ( - "os" - "strconv" - - "github.com/spf13/pflag" -) - -var ( - // TODO: make this not global - untrusted bool -) - -// AddTrustVerificationFlags adds content trust flags to the provided flagset -func AddTrustVerificationFlags(fs *pflag.FlagSet) { - trusted := getDefaultTrustState() - fs.BoolVar(&untrusted, "disable-content-trust", !trusted, "Skip image verification") -} - -// AddTrustSigningFlags adds "signing" flags to the provided flagset -func AddTrustSigningFlags(fs *pflag.FlagSet) { - trusted := getDefaultTrustState() - fs.BoolVar(&untrusted, "disable-content-trust", !trusted, "Skip image signing") -} - -// getDefaultTrustState returns true if content trust is enabled through the $DOCKER_CONTENT_TRUST environment variable. -func getDefaultTrustState() bool { - var trusted bool - if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { - if t, err := strconv.ParseBool(e); t || err != nil { - // treat any other value as true - trusted = true - } - } - return trusted -} - -// IsTrusted returns true if content trust is enabled, either through the $DOCKER_CONTENT_TRUST environment variable, -// or through `--disabled-content-trust=false` on a command. -func IsTrusted() bool { - return !untrusted -} diff --git a/vendor/github.com/docker/docker/cli/command/utils.go b/vendor/github.com/docker/docker/cli/command/utils.go deleted file mode 100644 index 853fe11c7..000000000 --- a/vendor/github.com/docker/docker/cli/command/utils.go +++ /dev/null @@ -1,119 +0,0 @@ -package command - -import ( - "bufio" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/pkg/system" -) - -// CopyToFile writes the content of the reader to the specified file -func CopyToFile(outfile string, r io.Reader) error { - // We use sequential file access here to avoid depleting the standby list - // on Windows. On Linux, this is a call directly to ioutil.TempFile - tmpFile, err := system.TempFileSequential(filepath.Dir(outfile), ".docker_temp_") - if err != nil { - return err - } - - tmpPath := tmpFile.Name() - - _, err = io.Copy(tmpFile, r) - tmpFile.Close() - - if err != nil { - os.Remove(tmpPath) - return err - } - - if err = os.Rename(tmpPath, outfile); err != nil { - os.Remove(tmpPath) - return err - } - - return nil -} - -// capitalizeFirst capitalizes the first character of string -func capitalizeFirst(s string) string { - switch l := len(s); l { - case 0: - return s - case 1: - return strings.ToLower(s) - default: - return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:]) - } -} - -// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter. -func PrettyPrint(i interface{}) string { - switch t := i.(type) { - case nil: - return "None" - case string: - return capitalizeFirst(t) - default: - return capitalizeFirst(fmt.Sprintf("%s", t)) - } -} - -// PromptForConfirmation requests and checks confirmation from user. -// This will display the provided message followed by ' [y/N] '. If -// the user input 'y' or 'Y' it returns true other false. If no -// message is provided "Are you sure you want to proceed? [y/N] " -// will be used instead. -func PromptForConfirmation(ins *InStream, outs *OutStream, message string) bool { - if message == "" { - message = "Are you sure you want to proceed?" - } - message += " [y/N] " - - fmt.Fprintf(outs, message) - - // On Windows, force the use of the regular OS stdin stream. - if runtime.GOOS == "windows" { - ins = NewInStream(os.Stdin) - } - - reader := bufio.NewReader(ins) - answer, _, _ := reader.ReadLine() - return strings.ToLower(string(answer)) == "y" -} - -// PruneFilters returns consolidated prune filters obtained from config.json and cli -func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args { - if dockerCli.ConfigFile() == nil { - return pruneFilters - } - for _, f := range dockerCli.ConfigFile().PruneFilters { - parts := strings.SplitN(f, "=", 2) - if len(parts) != 2 { - continue - } - if parts[0] == "label" { - // CLI label filter supersede config.json. - // If CLI label filter conflict with config.json, - // skip adding label! filter in config.json. - if pruneFilters.Include("label!") && pruneFilters.ExactMatch("label!", parts[1]) { - continue - } - } else if parts[0] == "label!" { - // CLI label! filter supersede config.json. - // If CLI label! filter conflict with config.json, - // skip adding label filter in config.json. - if pruneFilters.Include("label") && pruneFilters.ExactMatch("label", parts[1]) { - continue - } - } - pruneFilters.Add(parts[0], parts[1]) - } - - return pruneFilters -} diff --git a/vendor/github.com/docker/docker/cli/config/config.go b/vendor/github.com/docker/docker/cli/config/config.go deleted file mode 100644 index 9b21a2c90..000000000 --- a/vendor/github.com/docker/docker/cli/config/config.go +++ /dev/null @@ -1,120 +0,0 @@ -package config - -import ( - "io" - "os" - "path/filepath" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli/config/configfile" - "github.com/docker/docker/pkg/homedir" - "github.com/pkg/errors" -) - -const ( - // ConfigFileName is the name of config file - ConfigFileName = "config.json" - configFileDir = ".docker" - oldConfigfile = ".dockercfg" -) - -var ( - configDir = os.Getenv("DOCKER_CONFIG") -) - -func init() { - if configDir == "" { - configDir = filepath.Join(homedir.Get(), configFileDir) - } -} - -// Dir returns the directory the configuration file is stored in -func Dir() string { - return configDir -} - -// SetDir sets the directory the configuration file is stored in -func SetDir(dir string) { - configDir = dir -} - -// NewConfigFile initializes an empty configuration file for the given filename 'fn' -func NewConfigFile(fn string) *configfile.ConfigFile { - return &configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - HTTPHeaders: make(map[string]string), - Filename: fn, - } -} - -// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from -// a non-nested reader -func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LegacyLoadFromReader(configData) - return &configFile, err -} - -// LoadFromReader is a convenience function that creates a ConfigFile object from -// a reader -func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LoadFromReader(configData) - return &configFile, err -} - -// Load reads the configuration files in the given directory, and sets up -// the auth config information and returns values. -// FIXME: use the internal golang config parser -func Load(configDir string) (*configfile.ConfigFile, error) { - if configDir == "" { - configDir = Dir() - } - - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - Filename: filepath.Join(configDir, ConfigFileName), - } - - // Try happy path first - latest config file - if _, err := os.Stat(configFile.Filename); err == nil { - file, err := os.Open(configFile.Filename) - if err != nil { - return &configFile, errors.Errorf("%s - %v", configFile.Filename, err) - } - defer file.Close() - err = configFile.LoadFromReader(file) - if err != nil { - err = errors.Errorf("%s - %v", configFile.Filename, err) - } - return &configFile, err - } else if !os.IsNotExist(err) { - // if file is there but we can't stat it for any reason other - // than it doesn't exist then stop - return &configFile, errors.Errorf("%s - %v", configFile.Filename, err) - } - - // Can't find latest config file so check for the old one - confFile := filepath.Join(homedir.Get(), oldConfigfile) - if _, err := os.Stat(confFile); err != nil { - return &configFile, nil //missing file is not an error - } - file, err := os.Open(confFile) - if err != nil { - return &configFile, errors.Errorf("%s - %v", confFile, err) - } - defer file.Close() - err = configFile.LegacyLoadFromReader(file) - if err != nil { - return &configFile, errors.Errorf("%s - %v", confFile, err) - } - - if configFile.HTTPHeaders == nil { - configFile.HTTPHeaders = map[string]string{} - } - return &configFile, nil -} diff --git a/vendor/github.com/docker/docker/cli/config/configfile/file.go b/vendor/github.com/docker/docker/cli/config/configfile/file.go deleted file mode 100644 index 44bf71f38..000000000 --- a/vendor/github.com/docker/docker/cli/config/configfile/file.go +++ /dev/null @@ -1,189 +0,0 @@ -package configfile - -import ( - "encoding/base64" - "encoding/json" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/api/types" - "github.com/pkg/errors" -) - -const ( - // This constant is only used for really old config files when the - // URL wasn't saved as part of the config file and it was just - // assumed to be this value. - defaultIndexserver = "https://index.docker.io/v1/" -) - -// ConfigFile ~/.docker/config.json file info -type ConfigFile struct { - AuthConfigs map[string]types.AuthConfig `json:"auths"` - HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` - PsFormat string `json:"psFormat,omitempty"` - ImagesFormat string `json:"imagesFormat,omitempty"` - NetworksFormat string `json:"networksFormat,omitempty"` - PluginsFormat string `json:"pluginsFormat,omitempty"` - VolumesFormat string `json:"volumesFormat,omitempty"` - StatsFormat string `json:"statsFormat,omitempty"` - DetachKeys string `json:"detachKeys,omitempty"` - CredentialsStore string `json:"credsStore,omitempty"` - CredentialHelpers map[string]string `json:"credHelpers,omitempty"` - Filename string `json:"-"` // Note: for internal use only - ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` - ServicesFormat string `json:"servicesFormat,omitempty"` - TasksFormat string `json:"tasksFormat,omitempty"` - SecretFormat string `json:"secretFormat,omitempty"` - NodesFormat string `json:"nodesFormat,omitempty"` - PruneFilters []string `json:"pruneFilters,omitempty"` -} - -// LegacyLoadFromReader reads the non-nested configuration data given and sets up the -// auth config information with given directory and populates the receiver object -func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { - b, err := ioutil.ReadAll(configData) - if err != nil { - return err - } - - if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return errors.Errorf("The Auth config file is empty") - } - authConfig := types.AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return errors.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return err - } - authConfig.ServerAddress = defaultIndexserver - configFile.AuthConfigs[defaultIndexserver] = authConfig - } else { - for k, authConfig := range configFile.AuthConfigs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return err - } - authConfig.Auth = "" - authConfig.ServerAddress = k - configFile.AuthConfigs[k] = authConfig - } - } - return nil -} - -// LoadFromReader reads the configuration data given and sets up the auth config -// information with given directory and populates the receiver object -func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { - if err := json.NewDecoder(configData).Decode(&configFile); err != nil { - return err - } - var err error - for addr, ac := range configFile.AuthConfigs { - ac.Username, ac.Password, err = decodeAuth(ac.Auth) - if err != nil { - return err - } - ac.Auth = "" - ac.ServerAddress = addr - configFile.AuthConfigs[addr] = ac - } - return nil -} - -// ContainsAuth returns whether there is authentication configured -// in this file or not. -func (configFile *ConfigFile) ContainsAuth() bool { - return configFile.CredentialsStore != "" || - len(configFile.CredentialHelpers) > 0 || - len(configFile.AuthConfigs) > 0 -} - -// SaveToWriter encodes and writes out all the authorization information to -// the given writer -func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { - // Encode sensitive data into a new/temp struct - tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) - for k, authConfig := range configFile.AuthConfigs { - authCopy := authConfig - // encode and save the authstring, while blanking out the original fields - authCopy.Auth = encodeAuth(&authCopy) - authCopy.Username = "" - authCopy.Password = "" - authCopy.ServerAddress = "" - tmpAuthConfigs[k] = authCopy - } - - saveAuthConfigs := configFile.AuthConfigs - configFile.AuthConfigs = tmpAuthConfigs - defer func() { configFile.AuthConfigs = saveAuthConfigs }() - - data, err := json.MarshalIndent(configFile, "", "\t") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// Save encodes and writes out all the authorization information -func (configFile *ConfigFile) Save() error { - if configFile.Filename == "" { - return errors.Errorf("Can't save config with empty filename") - } - - if err := os.MkdirAll(filepath.Dir(configFile.Filename), 0700); err != nil { - return err - } - f, err := os.OpenFile(configFile.Filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - defer f.Close() - return configFile.SaveToWriter(f) -} - -// encodeAuth creates a base64 encoded string to containing authorization information -func encodeAuth(authConfig *types.AuthConfig) string { - if authConfig.Username == "" && authConfig.Password == "" { - return "" - } - - authStr := authConfig.Username + ":" + authConfig.Password - msg := []byte(authStr) - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) - base64.StdEncoding.Encode(encoded, msg) - return string(encoded) -} - -// decodeAuth decodes a base64 encoded string and returns username and password -func decodeAuth(authStr string) (string, string, error) { - if authStr == "" { - return "", "", nil - } - - decLen := base64.StdEncoding.DecodedLen(len(authStr)) - decoded := make([]byte, decLen) - authByte := []byte(authStr) - n, err := base64.StdEncoding.Decode(decoded, authByte) - if err != nil { - return "", "", err - } - if n > decLen { - return "", "", errors.Errorf("Something went wrong decoding auth config") - } - arr := strings.SplitN(string(decoded), ":", 2) - if len(arr) != 2 { - return "", "", errors.Errorf("Invalid auth configuration file") - } - password := strings.Trim(arr[1], "\x00") - return arr[0], password, nil -} diff --git a/vendor/github.com/docker/docker/cli/config/credentials/credentials.go b/vendor/github.com/docker/docker/cli/config/credentials/credentials.go deleted file mode 100644 index ca874cac5..000000000 --- a/vendor/github.com/docker/docker/cli/config/credentials/credentials.go +++ /dev/null @@ -1,17 +0,0 @@ -package credentials - -import ( - "github.com/docker/docker/api/types" -) - -// Store is the interface that any credentials store must implement. -type Store interface { - // Erase removes credentials from the store for a given server. - Erase(serverAddress string) error - // Get retrieves credentials from the store for a given server. - Get(serverAddress string) (types.AuthConfig, error) - // GetAll retrieves all the credentials from the store. - GetAll() (map[string]types.AuthConfig, error) - // Store saves credentials in the store. - Store(authConfig types.AuthConfig) error -} diff --git a/vendor/github.com/docker/docker/cli/config/credentials/default_store.go b/vendor/github.com/docker/docker/cli/config/credentials/default_store.go deleted file mode 100644 index 263a4ea87..000000000 --- a/vendor/github.com/docker/docker/cli/config/credentials/default_store.go +++ /dev/null @@ -1,22 +0,0 @@ -package credentials - -import ( - "os/exec" - - "github.com/docker/docker/cli/config/configfile" -) - -// DetectDefaultStore sets the default credentials store -// if the host includes the default store helper program. -func DetectDefaultStore(c *configfile.ConfigFile) { - if c.CredentialsStore != "" { - // user defined - return - } - - if defaultCredentialsStore != "" { - if _, err := exec.LookPath(remoteCredentialsPrefix + defaultCredentialsStore); err == nil { - c.CredentialsStore = defaultCredentialsStore - } - } -} diff --git a/vendor/github.com/docker/docker/cli/config/credentials/default_store_darwin.go b/vendor/github.com/docker/docker/cli/config/credentials/default_store_darwin.go deleted file mode 100644 index 63e8ed401..000000000 --- a/vendor/github.com/docker/docker/cli/config/credentials/default_store_darwin.go +++ /dev/null @@ -1,3 +0,0 @@ -package credentials - -const defaultCredentialsStore = "osxkeychain" diff --git a/vendor/github.com/docker/docker/cli/config/credentials/default_store_linux.go b/vendor/github.com/docker/docker/cli/config/credentials/default_store_linux.go deleted file mode 100644 index 864c540f6..000000000 --- a/vendor/github.com/docker/docker/cli/config/credentials/default_store_linux.go +++ /dev/null @@ -1,3 +0,0 @@ -package credentials - -const defaultCredentialsStore = "secretservice" diff --git a/vendor/github.com/docker/docker/cli/config/credentials/default_store_unsupported.go b/vendor/github.com/docker/docker/cli/config/credentials/default_store_unsupported.go deleted file mode 100644 index 519ef53dc..000000000 --- a/vendor/github.com/docker/docker/cli/config/credentials/default_store_unsupported.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !windows,!darwin,!linux - -package credentials - -const defaultCredentialsStore = "" diff --git a/vendor/github.com/docker/docker/cli/config/credentials/default_store_windows.go b/vendor/github.com/docker/docker/cli/config/credentials/default_store_windows.go deleted file mode 100644 index fb6a9745c..000000000 --- a/vendor/github.com/docker/docker/cli/config/credentials/default_store_windows.go +++ /dev/null @@ -1,3 +0,0 @@ -package credentials - -const defaultCredentialsStore = "wincred" diff --git a/vendor/github.com/docker/docker/cli/config/credentials/file_store.go b/vendor/github.com/docker/docker/cli/config/credentials/file_store.go deleted file mode 100644 index 3cab4a448..000000000 --- a/vendor/github.com/docker/docker/cli/config/credentials/file_store.go +++ /dev/null @@ -1,53 +0,0 @@ -package credentials - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli/config/configfile" - "github.com/docker/docker/registry" -) - -// fileStore implements a credentials store using -// the docker configuration file to keep the credentials in plain text. -type fileStore struct { - file *configfile.ConfigFile -} - -// NewFileStore creates a new file credentials store. -func NewFileStore(file *configfile.ConfigFile) Store { - return &fileStore{ - file: file, - } -} - -// Erase removes the given credentials from the file store. -func (c *fileStore) Erase(serverAddress string) error { - delete(c.file.AuthConfigs, serverAddress) - return c.file.Save() -} - -// Get retrieves credentials for a specific server from the file store. -func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { - authConfig, ok := c.file.AuthConfigs[serverAddress] - if !ok { - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for r, ac := range c.file.AuthConfigs { - if serverAddress == registry.ConvertToHostname(r) { - return ac, nil - } - } - - authConfig = types.AuthConfig{} - } - return authConfig, nil -} - -func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { - return c.file.AuthConfigs, nil -} - -// Store saves the given credentials in the file store. -func (c *fileStore) Store(authConfig types.AuthConfig) error { - c.file.AuthConfigs[authConfig.ServerAddress] = authConfig - return c.file.Save() -} diff --git a/vendor/github.com/docker/docker/cli/config/credentials/native_store.go b/vendor/github.com/docker/docker/cli/config/credentials/native_store.go deleted file mode 100644 index 68a87e8c6..000000000 --- a/vendor/github.com/docker/docker/cli/config/credentials/native_store.go +++ /dev/null @@ -1,144 +0,0 @@ -package credentials - -import ( - "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli/config/configfile" -) - -const ( - remoteCredentialsPrefix = "docker-credential-" - tokenUsername = "" -) - -// nativeStore implements a credentials store -// using native keychain to keep credentials secure. -// It piggybacks into a file store to keep users' emails. -type nativeStore struct { - programFunc client.ProgramFunc - fileStore Store -} - -// NewNativeStore creates a new native store that -// uses a remote helper program to manage credentials. -func NewNativeStore(file *configfile.ConfigFile, helperSuffix string) Store { - name := remoteCredentialsPrefix + helperSuffix - return &nativeStore{ - programFunc: client.NewShellProgramFunc(name), - fileStore: NewFileStore(file), - } -} - -// Erase removes the given credentials from the native store. -func (c *nativeStore) Erase(serverAddress string) error { - if err := client.Erase(c.programFunc, serverAddress); err != nil { - return err - } - - // Fallback to plain text store to remove email - return c.fileStore.Erase(serverAddress) -} - -// Get retrieves credentials for a specific server from the native store. -func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { - // load user email if it exist or an empty auth config. - auth, _ := c.fileStore.Get(serverAddress) - - creds, err := c.getCredentialsFromStore(serverAddress) - if err != nil { - return auth, err - } - auth.Username = creds.Username - auth.IdentityToken = creds.IdentityToken - auth.Password = creds.Password - - return auth, nil -} - -// GetAll retrieves all the credentials from the native store. -func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { - auths, err := c.listCredentialsInStore() - if err != nil { - return nil, err - } - - // Emails are only stored in the file store. - // This call can be safely eliminated when emails are removed. - fileConfigs, _ := c.fileStore.GetAll() - - authConfigs := make(map[string]types.AuthConfig) - for registry := range auths { - creds, err := c.getCredentialsFromStore(registry) - if err != nil { - return nil, err - } - ac, _ := fileConfigs[registry] // might contain Email - ac.Username = creds.Username - ac.Password = creds.Password - ac.IdentityToken = creds.IdentityToken - authConfigs[registry] = ac - } - - return authConfigs, nil -} - -// Store saves the given credentials in the file store. -func (c *nativeStore) Store(authConfig types.AuthConfig) error { - if err := c.storeCredentialsInStore(authConfig); err != nil { - return err - } - authConfig.Username = "" - authConfig.Password = "" - authConfig.IdentityToken = "" - - // Fallback to old credential in plain text to save only the email - return c.fileStore.Store(authConfig) -} - -// storeCredentialsInStore executes the command to store the credentials in the native store. -func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { - creds := &credentials.Credentials{ - ServerURL: config.ServerAddress, - Username: config.Username, - Secret: config.Password, - } - - if config.IdentityToken != "" { - creds.Username = tokenUsername - creds.Secret = config.IdentityToken - } - - return client.Store(c.programFunc, creds) -} - -// getCredentialsFromStore executes the command to get the credentials from the native store. -func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { - var ret types.AuthConfig - - creds, err := client.Get(c.programFunc, serverAddress) - if err != nil { - if credentials.IsErrCredentialsNotFound(err) { - // do not return an error if the credentials are not - // in the keychain. Let docker ask for new credentials. - return ret, nil - } - return ret, err - } - - if creds.Username == tokenUsername { - ret.IdentityToken = creds.Secret - } else { - ret.Password = creds.Secret - ret.Username = creds.Username - } - - ret.ServerAddress = serverAddress - return ret, nil -} - -// listCredentialsInStore returns a listing of stored credentials as a map of -// URL -> username. -func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { - return client.List(c.programFunc) -} diff --git a/vendor/github.com/docker/docker/cli/flags/client.go b/vendor/github.com/docker/docker/cli/flags/client.go deleted file mode 100644 index 9b6940f6b..000000000 --- a/vendor/github.com/docker/docker/cli/flags/client.go +++ /dev/null @@ -1,13 +0,0 @@ -package flags - -// ClientOptions are the options used to configure the client cli -type ClientOptions struct { - Common *CommonOptions - ConfigDir string - Version bool -} - -// NewClientOptions returns a new ClientOptions -func NewClientOptions() *ClientOptions { - return &ClientOptions{Common: NewCommonOptions()} -} diff --git a/vendor/github.com/docker/docker/cli/flags/common.go b/vendor/github.com/docker/docker/cli/flags/common.go deleted file mode 100644 index 3c9d8fa6e..000000000 --- a/vendor/github.com/docker/docker/cli/flags/common.go +++ /dev/null @@ -1,120 +0,0 @@ -package flags - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - cliconfig "github.com/docker/docker/cli/config" - "github.com/docker/docker/opts" - "github.com/docker/go-connections/tlsconfig" - "github.com/spf13/pflag" -) - -const ( - // DefaultTrustKeyFile is the default filename for the trust key - DefaultTrustKeyFile = "key.json" - // DefaultCaFile is the default filename for the CA pem file - DefaultCaFile = "ca.pem" - // DefaultKeyFile is the default filename for the key pem file - DefaultKeyFile = "key.pem" - // DefaultCertFile is the default filename for the cert pem file - DefaultCertFile = "cert.pem" - // FlagTLSVerify is the flag name for the TLS verification option - FlagTLSVerify = "tlsverify" -) - -var ( - dockerCertPath = os.Getenv("DOCKER_CERT_PATH") - dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" -) - -// CommonOptions are options common to both the client and the daemon. -type CommonOptions struct { - Debug bool - Hosts []string - LogLevel string - TLS bool - TLSVerify bool - TLSOptions *tlsconfig.Options - TrustKey string -} - -// NewCommonOptions returns a new CommonOptions -func NewCommonOptions() *CommonOptions { - return &CommonOptions{} -} - -// InstallFlags adds flags for the common options on the FlagSet -func (commonOpts *CommonOptions) InstallFlags(flags *pflag.FlagSet) { - if dockerCertPath == "" { - dockerCertPath = cliconfig.Dir() - } - - flags.BoolVarP(&commonOpts.Debug, "debug", "D", false, "Enable debug mode") - flags.StringVarP(&commonOpts.LogLevel, "log-level", "l", "info", `Set the logging level ("debug"|"info"|"warn"|"error"|"fatal")`) - flags.BoolVar(&commonOpts.TLS, "tls", false, "Use TLS; implied by --tlsverify") - flags.BoolVar(&commonOpts.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote") - - // TODO use flag flags.String("identity"}, "i", "", "Path to libtrust key file") - - commonOpts.TLSOptions = &tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, DefaultCaFile), - CertFile: filepath.Join(dockerCertPath, DefaultCertFile), - KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile), - } - tlsOptions := commonOpts.TLSOptions - flags.Var(opts.NewQuotedString(&tlsOptions.CAFile), "tlscacert", "Trust certs signed only by this CA") - flags.Var(opts.NewQuotedString(&tlsOptions.CertFile), "tlscert", "Path to TLS certificate file") - flags.Var(opts.NewQuotedString(&tlsOptions.KeyFile), "tlskey", "Path to TLS key file") - - hostOpt := opts.NewNamedListOptsRef("hosts", &commonOpts.Hosts, opts.ValidateHost) - flags.VarP(hostOpt, "host", "H", "Daemon socket(s) to connect to") -} - -// SetDefaultOptions sets default values for options after flag parsing is -// complete -func (commonOpts *CommonOptions) SetDefaultOptions(flags *pflag.FlagSet) { - // Regardless of whether the user sets it to true or false, if they - // specify --tlsverify at all then we need to turn on TLS - // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need - // to check that here as well - if flags.Changed(FlagTLSVerify) || commonOpts.TLSVerify { - commonOpts.TLS = true - } - - if !commonOpts.TLS { - commonOpts.TLSOptions = nil - } else { - tlsOptions := commonOpts.TLSOptions - tlsOptions.InsecureSkipVerify = !commonOpts.TLSVerify - - // Reset CertFile and KeyFile to empty string if the user did not specify - // the respective flags and the respective default files were not found. - if !flags.Changed("tlscert") { - if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { - tlsOptions.CertFile = "" - } - } - if !flags.Changed("tlskey") { - if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { - tlsOptions.KeyFile = "" - } - } - } -} - -// SetLogLevel sets the logrus logging level -func SetLogLevel(logLevel string) { - if logLevel != "" { - lvl, err := logrus.ParseLevel(logLevel) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) - os.Exit(1) - } - logrus.SetLevel(lvl) - } else { - logrus.SetLevel(logrus.InfoLevel) - } -} diff --git a/vendor/github.com/docker/docker/cli/trust/trust.go b/vendor/github.com/docker/docker/cli/trust/trust.go deleted file mode 100644 index 3c75e485c..000000000 --- a/vendor/github.com/docker/docker/cli/trust/trust.go +++ /dev/null @@ -1,232 +0,0 @@ -package trust - -import ( - "encoding/json" - "net" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/cli/command" - cliconfig "github.com/docker/docker/cli/config" - "github.com/docker/docker/registry" - "github.com/docker/go-connections/tlsconfig" - "github.com/docker/notary" - "github.com/docker/notary/client" - "github.com/docker/notary/passphrase" - "github.com/docker/notary/storage" - "github.com/docker/notary/trustmanager" - "github.com/docker/notary/trustpinning" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/signed" - "github.com/pkg/errors" -) - -var ( - // ReleasesRole is the role named "releases" - ReleasesRole = path.Join(data.CanonicalTargetsRole, "releases") -) - -func trustDirectory() string { - return filepath.Join(cliconfig.Dir(), "trust") -} - -// certificateDirectory returns the directory containing -// TLS certificates for the given server. An error is -// returned if there was an error parsing the server string. -func certificateDirectory(server string) (string, error) { - u, err := url.Parse(server) - if err != nil { - return "", err - } - - return filepath.Join(cliconfig.Dir(), "tls", u.Host), nil -} - -// Server returns the base URL for the trust server. -func Server(index *registrytypes.IndexInfo) (string, error) { - if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" { - urlObj, err := url.Parse(s) - if err != nil || urlObj.Scheme != "https" { - return "", errors.Errorf("valid https URL required for trust server, got %s", s) - } - - return s, nil - } - if index.Official { - return registry.NotaryServer, nil - } - return "https://" + index.Name, nil -} - -type simpleCredentialStore struct { - auth types.AuthConfig -} - -func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) { - return scs.auth.Username, scs.auth.Password -} - -func (scs simpleCredentialStore) RefreshToken(u *url.URL, service string) string { - return scs.auth.IdentityToken -} - -func (scs simpleCredentialStore) SetRefreshToken(*url.URL, string, string) { -} - -// GetNotaryRepository returns a NotaryRepository which stores all the -// information needed to operate on a notary repository. -// It creates an HTTP transport providing authentication support. -func GetNotaryRepository(streams command.Streams, repoInfo *registry.RepositoryInfo, authConfig types.AuthConfig, actions ...string) (*client.NotaryRepository, error) { - server, err := Server(repoInfo.Index) - if err != nil { - return nil, err - } - - var cfg = tlsconfig.ClientDefault() - cfg.InsecureSkipVerify = !repoInfo.Index.Secure - - // Get certificate base directory - certDir, err := certificateDirectory(server) - if err != nil { - return nil, err - } - logrus.Debugf("reading certificate directory: %s", certDir) - - if err := registry.ReadCertsDirectory(cfg, certDir); err != nil { - return nil, err - } - - base := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: cfg, - DisableKeepAlives: true, - } - - // Skip configuration headers since request is not going to Docker daemon - modifiers := registry.DockerHeaders(command.UserAgent(), http.Header{}) - authTransport := transport.NewTransport(base, modifiers...) - pingClient := &http.Client{ - Transport: authTransport, - Timeout: 5 * time.Second, - } - endpointStr := server + "/v2/" - req, err := http.NewRequest("GET", endpointStr, nil) - if err != nil { - return nil, err - } - - challengeManager := challenge.NewSimpleManager() - - resp, err := pingClient.Do(req) - if err != nil { - // Ignore error on ping to operate in offline mode - logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err) - } else { - defer resp.Body.Close() - - // Add response to the challenge manager to parse out - // authentication header and register authentication method - if err := challengeManager.AddResponse(resp); err != nil { - return nil, err - } - } - - scope := auth.RepositoryScope{ - Repository: repoInfo.Name.Name(), - Actions: actions, - Class: repoInfo.Class, - } - creds := simpleCredentialStore{auth: authConfig} - tokenHandlerOptions := auth.TokenHandlerOptions{ - Transport: authTransport, - Credentials: creds, - Scopes: []auth.Scope{scope}, - ClientID: registry.AuthClientID, - } - tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))) - tr := transport.NewTransport(base, modifiers...) - - return client.NewNotaryRepository( - trustDirectory(), - repoInfo.Name.Name(), - server, - tr, - getPassphraseRetriever(streams), - trustpinning.TrustPinConfig{}) -} - -func getPassphraseRetriever(streams command.Streams) notary.PassRetriever { - aliasMap := map[string]string{ - "root": "root", - "snapshot": "repository", - "targets": "repository", - "default": "repository", - } - baseRetriever := passphrase.PromptRetrieverWithInOut(streams.In(), streams.Out(), aliasMap) - env := map[string]string{ - "root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"), - "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), - "targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), - "default": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), - } - - return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) { - if v := env[alias]; v != "" { - return v, numAttempts > 1, nil - } - // For non-root roles, we can also try the "default" alias if it is specified - if v := env["default"]; v != "" && alias != data.CanonicalRootRole { - return v, numAttempts > 1, nil - } - return baseRetriever(keyName, alias, createNew, numAttempts) - } -} - -// NotaryError formats an error message received from the notary service -func NotaryError(repoName string, err error) error { - switch err.(type) { - case *json.SyntaxError: - logrus.Debugf("Notary syntax error: %s", err) - return errors.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName) - case signed.ErrExpired: - return errors.Errorf("Error: remote repository %s out-of-date: %v", repoName, err) - case trustmanager.ErrKeyNotFound: - return errors.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err) - case storage.NetworkError: - return errors.Errorf("Error: error contacting notary server: %v", err) - case storage.ErrMetaNotFound: - return errors.Errorf("Error: trust data missing for remote repository %s or remote repository not found: %v", repoName, err) - case trustpinning.ErrRootRotationFail, trustpinning.ErrValidationFail, signed.ErrInvalidKeyType: - return errors.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err) - case signed.ErrNoKeys: - return errors.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err) - case signed.ErrLowVersion: - return errors.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err) - case signed.ErrRoleThreshold: - return errors.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err) - case client.ErrRepositoryNotExist: - return errors.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err) - case signed.ErrInsufficientSignatures: - return errors.Errorf("Error: could not produce valid signature for %s. If Yubikey was used, was touch input provided?: %v", repoName, err) - } - - return err -} diff --git a/vendor/github.com/docker/docker/dockerversion/useragent.go b/vendor/github.com/docker/docker/dockerversion/useragent.go deleted file mode 100644 index 53632cbc3..000000000 --- a/vendor/github.com/docker/docker/dockerversion/useragent.go +++ /dev/null @@ -1,76 +0,0 @@ -package dockerversion - -import ( - "fmt" - "runtime" - - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/useragent" - "golang.org/x/net/context" -) - -// UAStringKey is used as key type for user-agent string in net/context struct -const UAStringKey = "upstream-user-agent" - -// DockerUserAgent is the User-Agent the Docker client uses to identify itself. -// In accordance with RFC 7231 (5.5.3) is of the form: -// [docker client's UA] UpstreamClient([upstream client's UA]) -func DockerUserAgent(ctx context.Context) string { - httpVersion := make([]useragent.VersionInfo, 0, 6) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) - if kernelVersion, err := kernel.GetKernelVersion(); err == nil { - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) - } - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) - - dockerUA := useragent.AppendVersions("", httpVersion...) - upstreamUA := getUserAgentFromContext(ctx) - if len(upstreamUA) > 0 { - ret := insertUpstreamUserAgent(upstreamUA, dockerUA) - return ret - } - return dockerUA -} - -// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists -func getUserAgentFromContext(ctx context.Context) string { - var upstreamUA string - if ctx != nil { - var ki interface{} = ctx.Value(UAStringKey) - if ki != nil { - upstreamUA = ctx.Value(UAStringKey).(string) - } - } - return upstreamUA -} - -// escapeStr returns s with every rune in charsToEscape escaped by a backslash -func escapeStr(s string, charsToEscape string) string { - var ret string - for _, currRune := range s { - appended := false - for _, escapeableRune := range charsToEscape { - if currRune == escapeableRune { - ret += `\` + string(currRune) - appended = true - break - } - } - if !appended { - ret += string(currRune) - } - } - return ret -} - -// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent -// string of the form: -// $dockerUA UpstreamClient($upstreamUA) -func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { - charsToEscape := `();\` - upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) - return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) -} diff --git a/vendor/github.com/docker/docker/dockerversion/version_lib.go b/vendor/github.com/docker/docker/dockerversion/version_lib.go deleted file mode 100644 index 33f77d3ce..000000000 --- a/vendor/github.com/docker/docker/dockerversion/version_lib.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !autogen - -// Package dockerversion is auto-generated at build-time -package dockerversion - -// Default build-time variable for library-import. -// This file is overridden on build with build-time informations. -const ( - GitCommit string = "library-import" - Version string = "library-import" - BuildTime string = "library-import" - IAmStatic string = "library-import" - ContainerdCommitID string = "library-import" - RuncCommitID string = "library-import" - InitCommitID string = "library-import" -) diff --git a/vendor/github.com/docker/docker/opts/env.go b/vendor/github.com/docker/docker/opts/env.go deleted file mode 100644 index e6ddd7330..000000000 --- a/vendor/github.com/docker/docker/opts/env.go +++ /dev/null @@ -1,46 +0,0 @@ -package opts - -import ( - "fmt" - "os" - "runtime" - "strings" -) - -// ValidateEnv validates an environment variable and returns it. -// If no value is specified, it returns the current value using os.Getenv. -// -// As on ParseEnvFile and related to #16585, environment variable names -// are not validate what so ever, it's up to application inside docker -// to validate them or not. -// -// The only validation here is to check if name is empty, per #25099 -func ValidateEnv(val string) (string, error) { - arr := strings.Split(val, "=") - if arr[0] == "" { - return "", fmt.Errorf("invalid environment variable: %s", val) - } - if len(arr) > 1 { - return val, nil - } - if !doesEnvExist(val) { - return val, nil - } - return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil -} - -func doesEnvExist(name string) bool { - for _, entry := range os.Environ() { - parts := strings.SplitN(entry, "=", 2) - if runtime.GOOS == "windows" { - // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent. - if strings.EqualFold(parts[0], name) { - return true - } - } - if parts[0] == name { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go deleted file mode 100644 index 594cccf2f..000000000 --- a/vendor/github.com/docker/docker/opts/hosts.go +++ /dev/null @@ -1,165 +0,0 @@ -package opts - -import ( - "fmt" - "net" - "net/url" - "strconv" - "strings" -) - -var ( - // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp:// - // These are the IANA registered port numbers for use with Docker - // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker - DefaultHTTPPort = 2375 // Default HTTP Port - // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled - DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port - // DefaultUnixSocket Path for the unix socket. - // Docker daemon by default always listens on the default unix socket - DefaultUnixSocket = "/var/run/docker.sock" - // DefaultTCPHost constant defines the default host string used by docker on Windows - DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) - // DefaultTLSHost constant defines the default host string used by docker for TLS sockets - DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) - // DefaultNamedPipe defines the default named pipe used by docker on Windows - DefaultNamedPipe = `//./pipe/docker_engine` -) - -// ValidateHost validates that the specified string is a valid host and returns it. -func ValidateHost(val string) (string, error) { - host := strings.TrimSpace(val) - // The empty string means default and is not handled by parseDockerDaemonHost - if host != "" { - _, err := parseDockerDaemonHost(host) - if err != nil { - return val, err - } - } - // Note: unlike most flag validators, we don't return the mutated value here - // we need to know what the user entered later (using ParseHost) to adjust for TLS - return val, nil -} - -// ParseHost and set defaults for a Daemon host string -func ParseHost(defaultToTLS bool, val string) (string, error) { - host := strings.TrimSpace(val) - if host == "" { - if defaultToTLS { - host = DefaultTLSHost - } else { - host = DefaultHost - } - } else { - var err error - host, err = parseDockerDaemonHost(host) - if err != nil { - return val, err - } - } - return host, nil -} - -// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. -// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. -func parseDockerDaemonHost(addr string) (string, error) { - addrParts := strings.SplitN(addr, "://", 2) - if len(addrParts) == 1 && addrParts[0] != "" { - addrParts = []string{"tcp", addrParts[0]} - } - - switch addrParts[0] { - case "tcp": - return ParseTCPAddr(addrParts[1], DefaultTCPHost) - case "unix": - return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) - case "npipe": - return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) - case "fd": - return addr, nil - default: - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } -} - -// parseSimpleProtoAddr parses and validates that the specified address is a valid -// socket address for simple protocols like unix and npipe. It returns a formatted -// socket address, either using the address parsed from addr, or the contents of -// defaultAddr if addr is a blank string. -func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, proto+"://") - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) - } - if addr == "" { - addr = defaultAddr - } - return fmt.Sprintf("%s://%s", proto, addr), nil -} - -// ParseTCPAddr parses and validates that the specified address is a valid TCP -// address. It returns a formatted TCP address, either using the address parsed -// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. -// tryAddr is expected to have already been Trim()'d -// defaultAddr must be in the full `tcp://host:port` form -func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { - if tryAddr == "" || tryAddr == "tcp://" { - return defaultAddr, nil - } - addr := strings.TrimPrefix(tryAddr, "tcp://") - if strings.Contains(addr, "://") || addr == "" { - return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) - } - - defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") - defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) - if err != nil { - return "", err - } - // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but - // not 1.4. See https://github.com/golang/go/issues/12200 and - // https://github.com/golang/go/issues/6530. - if strings.HasSuffix(addr, "]:") { - addr += defaultPort - } - - u, err := url.Parse("tcp://" + addr) - if err != nil { - return "", err - } - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - // try port addition once - host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) - } - if err != nil { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - if host == "" { - host = defaultHost - } - if port == "" { - port = defaultPort - } - p, err := strconv.Atoi(port) - if err != nil && p == 0 { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil -} - -// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. -// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). -func ValidateExtraHost(val string) (string, error) { - // allow for IPv6 addresses in extra hosts by only splitting on first ":" - arr := strings.SplitN(val, ":", 2) - if len(arr) != 2 || len(arr[0]) == 0 { - return "", fmt.Errorf("bad format for add-host: %q", val) - } - if _, err := ValidateIPAddress(arr[1]); err != nil { - return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) - } - return val, nil -} diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go deleted file mode 100644 index 611407a9d..000000000 --- a/vendor/github.com/docker/docker/opts/hosts_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package opts - -import "fmt" - -// DefaultHost constant defines the default host string used by docker on other hosts than Windows -var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go deleted file mode 100644 index 7c239e00f..000000000 --- a/vendor/github.com/docker/docker/opts/hosts_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build windows - -package opts - -// DefaultHost constant defines the default host string used by docker on Windows -var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go deleted file mode 100644 index fb03b5011..000000000 --- a/vendor/github.com/docker/docker/opts/ip.go +++ /dev/null @@ -1,47 +0,0 @@ -package opts - -import ( - "fmt" - "net" -) - -// IPOpt holds an IP. It is used to store values from CLI flags. -type IPOpt struct { - *net.IP -} - -// NewIPOpt creates a new IPOpt from a reference net.IP and a -// string representation of an IP. If the string is not a valid -// IP it will fallback to the specified reference. -func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { - o := &IPOpt{ - IP: ref, - } - o.Set(defaultVal) - return o -} - -// Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parseable as an IP address it returns an error. -func (o *IPOpt) Set(val string) error { - ip := net.ParseIP(val) - if ip == nil { - return fmt.Errorf("%s is not an ip address", val) - } - *o.IP = ip - return nil -} - -// String returns the IP address stored in the IPOpt. If stored IP is a -// nil pointer, it returns an empty string. -func (o *IPOpt) String() string { - if *o.IP == nil { - return "" - } - return o.IP.String() -} - -// Type returns the type of the option -func (o *IPOpt) Type() string { - return "ip" -} diff --git a/vendor/github.com/docker/docker/opts/mount.go b/vendor/github.com/docker/docker/opts/mount.go deleted file mode 100644 index d4ccf838d..000000000 --- a/vendor/github.com/docker/docker/opts/mount.go +++ /dev/null @@ -1,173 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "os" - "strconv" - "strings" - - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/go-units" -) - -// MountOpt is a Value type for parsing mounts -type MountOpt struct { - values []mounttypes.Mount -} - -// Set a new mount value -func (m *MountOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - mount := mounttypes.Mount{} - - volumeOptions := func() *mounttypes.VolumeOptions { - if mount.VolumeOptions == nil { - mount.VolumeOptions = &mounttypes.VolumeOptions{ - Labels: make(map[string]string), - } - } - if mount.VolumeOptions.DriverConfig == nil { - mount.VolumeOptions.DriverConfig = &mounttypes.Driver{} - } - return mount.VolumeOptions - } - - bindOptions := func() *mounttypes.BindOptions { - if mount.BindOptions == nil { - mount.BindOptions = new(mounttypes.BindOptions) - } - return mount.BindOptions - } - - tmpfsOptions := func() *mounttypes.TmpfsOptions { - if mount.TmpfsOptions == nil { - mount.TmpfsOptions = new(mounttypes.TmpfsOptions) - } - return mount.TmpfsOptions - } - - setValueOnMap := func(target map[string]string, value string) { - parts := strings.SplitN(value, "=", 2) - if len(parts) == 1 { - target[value] = "" - } else { - target[parts[0]] = parts[1] - } - } - - mount.Type = mounttypes.TypeVolume // default to volume mounts - // Set writable as the default - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) == 1 { - switch key { - case "readonly", "ro": - mount.ReadOnly = true - continue - case "volume-nocopy": - volumeOptions().NoCopy = true - continue - } - } - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "type": - mount.Type = mounttypes.Type(strings.ToLower(value)) - case "source", "src": - mount.Source = value - case "target", "dst", "destination": - mount.Target = value - case "readonly", "ro": - mount.ReadOnly, err = strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - case "consistency": - mount.Consistency = mounttypes.Consistency(strings.ToLower(value)) - case "bind-propagation": - bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value)) - case "volume-nocopy": - volumeOptions().NoCopy, err = strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for volume-nocopy: %s", value) - } - case "volume-label": - setValueOnMap(volumeOptions().Labels, value) - case "volume-driver": - volumeOptions().DriverConfig.Name = value - case "volume-opt": - if volumeOptions().DriverConfig.Options == nil { - volumeOptions().DriverConfig.Options = make(map[string]string) - } - setValueOnMap(volumeOptions().DriverConfig.Options, value) - case "tmpfs-size": - sizeBytes, err := units.RAMInBytes(value) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - tmpfsOptions().SizeBytes = sizeBytes - case "tmpfs-mode": - ui64, err := strconv.ParseUint(value, 8, 32) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - tmpfsOptions().Mode = os.FileMode(ui64) - default: - return fmt.Errorf("unexpected key '%s' in '%s'", key, field) - } - } - - if mount.Type == "" { - return fmt.Errorf("type is required") - } - - if mount.Target == "" { - return fmt.Errorf("target is required") - } - - if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume { - return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type) - } - if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind { - return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type) - } - if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs { - return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type) - } - - m.values = append(m.values, mount) - return nil -} - -// Type returns the type of this option -func (m *MountOpt) Type() string { - return "mount" -} - -// String returns a string repr of this option -func (m *MountOpt) String() string { - mounts := []string{} - for _, mount := range m.values { - repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target) - mounts = append(mounts, repr) - } - return strings.Join(mounts, ", ") -} - -// Value returns the mounts -func (m *MountOpt) Value() []mounttypes.Mount { - return m.values -} diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go deleted file mode 100644 index f76f30805..000000000 --- a/vendor/github.com/docker/docker/opts/opts.go +++ /dev/null @@ -1,488 +0,0 @@ -package opts - -import ( - "fmt" - "math/big" - "net" - "path" - "regexp" - "strings" - - "github.com/docker/docker/api/types/filters" - units "github.com/docker/go-units" -) - -var ( - alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) - domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) -) - -// ListOpts holds a list of values and a validation function. -type ListOpts struct { - values *[]string - validator ValidatorFctType -} - -// NewListOpts creates a new ListOpts with the specified validator. -func NewListOpts(validator ValidatorFctType) ListOpts { - var values []string - return *NewListOptsRef(&values, validator) -} - -// NewListOptsRef creates a new ListOpts with the specified values and validator. -func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { - return &ListOpts{ - values: values, - validator: validator, - } -} - -func (opts *ListOpts) String() string { - if len(*opts.values) == 0 { - return "" - } - return fmt.Sprintf("%v", *opts.values) -} - -// Set validates if needed the input value and adds it to the -// internal slice. -func (opts *ListOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - (*opts.values) = append((*opts.values), value) - return nil -} - -// Delete removes the specified element from the slice. -func (opts *ListOpts) Delete(key string) { - for i, k := range *opts.values { - if k == key { - (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) - return - } - } -} - -// GetMap returns the content of values in a map in order to avoid -// duplicates. -func (opts *ListOpts) GetMap() map[string]struct{} { - ret := make(map[string]struct{}) - for _, k := range *opts.values { - ret[k] = struct{}{} - } - return ret -} - -// GetAll returns the values of slice. -func (opts *ListOpts) GetAll() []string { - return (*opts.values) -} - -// GetAllOrEmpty returns the values of the slice -// or an empty slice when there are no values. -func (opts *ListOpts) GetAllOrEmpty() []string { - v := *opts.values - if v == nil { - return make([]string, 0) - } - return v -} - -// Get checks the existence of the specified key. -func (opts *ListOpts) Get(key string) bool { - for _, k := range *opts.values { - if k == key { - return true - } - } - return false -} - -// Len returns the amount of element in the slice. -func (opts *ListOpts) Len() int { - return len((*opts.values)) -} - -// Type returns a string name for this Option type -func (opts *ListOpts) Type() string { - return "list" -} - -// WithValidator returns the ListOpts with validator set. -func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts { - opts.validator = validator - return opts -} - -// NamedOption is an interface that list and map options -// with names implement. -type NamedOption interface { - Name() string -} - -// NamedListOpts is a ListOpts with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -type NamedListOpts struct { - name string - ListOpts -} - -var _ NamedOption = &NamedListOpts{} - -// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. -func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { - return &NamedListOpts{ - name: name, - ListOpts: *NewListOptsRef(values, validator), - } -} - -// Name returns the name of the NamedListOpts in the configuration. -func (o *NamedListOpts) Name() string { - return o.name -} - -// MapOpts holds a map of values and a validation function. -type MapOpts struct { - values map[string]string - validator ValidatorFctType -} - -// Set validates if needed the input value and add it to the -// internal map, by splitting on '='. -func (opts *MapOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - vals := strings.SplitN(value, "=", 2) - if len(vals) == 1 { - (opts.values)[vals[0]] = "" - } else { - (opts.values)[vals[0]] = vals[1] - } - return nil -} - -// GetAll returns the values of MapOpts as a map. -func (opts *MapOpts) GetAll() map[string]string { - return opts.values -} - -func (opts *MapOpts) String() string { - return fmt.Sprintf("%v", map[string]string((opts.values))) -} - -// Type returns a string name for this Option type -func (opts *MapOpts) Type() string { - return "map" -} - -// NewMapOpts creates a new MapOpts with the specified map of values and a validator. -func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { - if values == nil { - values = make(map[string]string) - } - return &MapOpts{ - values: values, - validator: validator, - } -} - -// NamedMapOpts is a MapOpts struct with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -type NamedMapOpts struct { - name string - MapOpts -} - -var _ NamedOption = &NamedMapOpts{} - -// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. -func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { - return &NamedMapOpts{ - name: name, - MapOpts: *NewMapOpts(values, validator), - } -} - -// Name returns the name of the NamedMapOpts in the configuration. -func (o *NamedMapOpts) Name() string { - return o.name -} - -// ValidatorFctType defines a validator function that returns a validated string and/or an error. -type ValidatorFctType func(val string) (string, error) - -// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error -type ValidatorFctListType func(val string) ([]string, error) - -// ValidateIPAddress validates an Ip address. -func ValidateIPAddress(val string) (string, error) { - var ip = net.ParseIP(strings.TrimSpace(val)) - if ip != nil { - return ip.String(), nil - } - return "", fmt.Errorf("%s is not an ip address", val) -} - -// ValidateMACAddress validates a MAC address. -func ValidateMACAddress(val string) (string, error) { - _, err := net.ParseMAC(strings.TrimSpace(val)) - if err != nil { - return "", err - } - return val, nil -} - -// ValidateDNSSearch validates domain for resolvconf search configuration. -// A zero length domain is represented by a dot (.). -func ValidateDNSSearch(val string) (string, error) { - if val = strings.Trim(val, " "); val == "." { - return val, nil - } - return validateDomain(val) -} - -func validateDomain(val string) (string, error) { - if alphaRegexp.FindString(val) == "" { - return "", fmt.Errorf("%s is not a valid domain", val) - } - ns := domainRegexp.FindSubmatch([]byte(val)) - if len(ns) > 0 && len(ns[1]) < 255 { - return string(ns[1]), nil - } - return "", fmt.Errorf("%s is not a valid domain", val) -} - -// ValidateLabel validates that the specified string is a valid label, and returns it. -// Labels are in the form on key=value. -func ValidateLabel(val string) (string, error) { - if strings.Count(val, "=") < 1 { - return "", fmt.Errorf("bad attribute format: %s", val) - } - return val, nil -} - -// ValidateSysctl validates a sysctl and returns it. -func ValidateSysctl(val string) (string, error) { - validSysctlMap := map[string]bool{ - "kernel.msgmax": true, - "kernel.msgmnb": true, - "kernel.msgmni": true, - "kernel.sem": true, - "kernel.shmall": true, - "kernel.shmmax": true, - "kernel.shmmni": true, - "kernel.shm_rmid_forced": true, - } - validSysctlPrefixes := []string{ - "net.", - "fs.mqueue.", - } - arr := strings.Split(val, "=") - if len(arr) < 2 { - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) - } - if validSysctlMap[arr[0]] { - return val, nil - } - - for _, vp := range validSysctlPrefixes { - if strings.HasPrefix(arr[0], vp) { - return val, nil - } - } - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) -} - -// FilterOpt is a flag type for validating filters -type FilterOpt struct { - filter filters.Args -} - -// NewFilterOpt returns a new FilterOpt -func NewFilterOpt() FilterOpt { - return FilterOpt{filter: filters.NewArgs()} -} - -func (o *FilterOpt) String() string { - repr, err := filters.ToParam(o.filter) - if err != nil { - return "invalid filters" - } - return repr -} - -// Set sets the value of the opt by parsing the command line value -func (o *FilterOpt) Set(value string) error { - var err error - o.filter, err = filters.ParseFlag(value, o.filter) - return err -} - -// Type returns the option type -func (o *FilterOpt) Type() string { - return "filter" -} - -// Value returns the value of this option -func (o *FilterOpt) Value() filters.Args { - return o.filter -} - -// NanoCPUs is a type for fixed point fractional number. -type NanoCPUs int64 - -// String returns the string format of the number -func (c *NanoCPUs) String() string { - if *c == 0 { - return "" - } - return big.NewRat(c.Value(), 1e9).FloatString(3) -} - -// Set sets the value of the NanoCPU by passing a string -func (c *NanoCPUs) Set(value string) error { - cpus, err := ParseCPUs(value) - *c = NanoCPUs(cpus) - return err -} - -// Type returns the type -func (c *NanoCPUs) Type() string { - return "decimal" -} - -// Value returns the value in int64 -func (c *NanoCPUs) Value() int64 { - return int64(*c) -} - -// ParseCPUs takes a string ratio and returns an integer value of nano cpus -func ParseCPUs(value string) (int64, error) { - cpu, ok := new(big.Rat).SetString(value) - if !ok { - return 0, fmt.Errorf("failed to parse %v as a rational number", value) - } - nano := cpu.Mul(cpu, big.NewRat(1e9, 1)) - if !nano.IsInt() { - return 0, fmt.Errorf("value is too precise") - } - return nano.Num().Int64(), nil -} - -// ParseLink parses and validates the specified string as a link format (name:alias) -func ParseLink(val string) (string, string, error) { - if val == "" { - return "", "", fmt.Errorf("empty string specified for links") - } - arr := strings.Split(val, ":") - if len(arr) > 2 { - return "", "", fmt.Errorf("bad format for links: %s", val) - } - if len(arr) == 1 { - return val, val, nil - } - // This is kept because we can actually get a HostConfig with links - // from an already created container and the format is not `foo:bar` - // but `/foo:/c1/bar` - if strings.HasPrefix(arr[0], "/") { - _, alias := path.Split(arr[1]) - return arr[0][1:], alias, nil - } - return arr[0], arr[1], nil -} - -// ValidateLink validates that the specified string has a valid link format (containerName:alias). -func ValidateLink(val string) (string, error) { - _, _, err := ParseLink(val) - return val, err -} - -// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc) -type MemBytes int64 - -// String returns the string format of the human readable memory bytes -func (m *MemBytes) String() string { - // NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not. - // We return "0" in case value is 0 here so that the default value is hidden. - // (Sometimes "default 0 B" is actually misleading) - if m.Value() != 0 { - return units.BytesSize(float64(m.Value())) - } - return "0" -} - -// Set sets the value of the MemBytes by passing a string -func (m *MemBytes) Set(value string) error { - val, err := units.RAMInBytes(value) - *m = MemBytes(val) - return err -} - -// Type returns the type -func (m *MemBytes) Type() string { - return "bytes" -} - -// Value returns the value in int64 -func (m *MemBytes) Value() int64 { - return int64(*m) -} - -// UnmarshalJSON is the customized unmarshaler for MemBytes -func (m *MemBytes) UnmarshalJSON(s []byte) error { - if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' { - return fmt.Errorf("invalid size: %q", s) - } - val, err := units.RAMInBytes(string(s[1 : len(s)-1])) - *m = MemBytes(val) - return err -} - -// MemSwapBytes is a type for human readable memory bytes (like 128M, 2g, etc). -// It differs from MemBytes in that -1 is valid and the default. -type MemSwapBytes int64 - -// Set sets the value of the MemSwapBytes by passing a string -func (m *MemSwapBytes) Set(value string) error { - if value == "-1" { - *m = MemSwapBytes(-1) - return nil - } - val, err := units.RAMInBytes(value) - *m = MemSwapBytes(val) - return err -} - -// Type returns the type -func (m *MemSwapBytes) Type() string { - return "bytes" -} - -// Value returns the value in int64 -func (m *MemSwapBytes) Value() int64 { - return int64(*m) -} - -func (m *MemSwapBytes) String() string { - b := MemBytes(*m) - return b.String() -} - -// UnmarshalJSON is the customized unmarshaler for MemSwapBytes -func (m *MemSwapBytes) UnmarshalJSON(s []byte) error { - b := MemBytes(*m) - return b.UnmarshalJSON(s) -} diff --git a/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go deleted file mode 100644 index 2766a43a0..000000000 --- a/vendor/github.com/docker/docker/opts/opts_unix.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !windows - -package opts - -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 -const DefaultHTTPHost = "localhost" diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go deleted file mode 100644 index 98b7251a9..000000000 --- a/vendor/github.com/docker/docker/opts/opts_windows.go +++ /dev/null @@ -1,56 +0,0 @@ -package opts - -// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. -// @jhowardmsft, @swernli. -// -// On Windows, this mitigates a problem with the default options of running -// a docker client against a local docker daemon on TP5. -// -// What was found that if the default host is "localhost", even if the client -// (and daemon as this is local) is not physically on a network, and the DNS -// cache is flushed (ipconfig /flushdns), then the client will pause for -// exactly one second when connecting to the daemon for calls. For example -// using docker run windowsservercore cmd, the CLI will send a create followed -// by an attach. You see the delay between the attach finishing and the attach -// being seen by the daemon. -// -// Here's some daemon debug logs with additional debug spew put in. The -// AfterWriteJSON log is the very last thing the daemon does as part of the -// create call. The POST /attach is the second CLI call. Notice the second -// time gap. -// -// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" -// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" -// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." -// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... -// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." -// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." -// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" -// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" -// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" -// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" -// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" -// ... 1 second gap here.... -// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" -// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" -// -// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change -// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, -// the Windows networking stack is supposed to resolve "localhost" internally, -// without hitting DNS, or even reading the hosts file (which is why localhost -// is commented out in the hosts file on Windows). -// -// We have validated that working around this using the actual IPv4 localhost -// address does not cause the delay. -// -// This does not occur with the docker client built with 1.4.3 on the same -// Windows build, regardless of whether the daemon is built using 1.5.1 -// or 1.4.3. It does not occur on Linux. We also verified we see the same thing -// on a cross-compiled Windows binary (from Linux). -// -// Final note: This is a mitigation, not a 'real' fix. It is still susceptible -// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' -// explicitly. - -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 -const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/docker/docker/opts/port.go b/vendor/github.com/docker/docker/opts/port.go deleted file mode 100644 index 152683c98..000000000 --- a/vendor/github.com/docker/docker/opts/port.go +++ /dev/null @@ -1,162 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/go-connections/nat" -) - -const ( - portOptTargetPort = "target" - portOptPublishedPort = "published" - portOptProtocol = "protocol" - portOptMode = "mode" -) - -// PortOpt represents a port config in swarm mode. -type PortOpt struct { - ports []swarm.PortConfig -} - -// Set a new port value -func (p *PortOpt) Set(value string) error { - longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value) - if err != nil { - return err - } - if longSyntax { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - pConfig := swarm.PortConfig{} - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid field %s", field) - } - - key := strings.ToLower(parts[0]) - value := strings.ToLower(parts[1]) - - switch key { - case portOptProtocol: - if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) { - return fmt.Errorf("invalid protocol value %s", value) - } - - pConfig.Protocol = swarm.PortConfigProtocol(value) - case portOptMode: - if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) { - return fmt.Errorf("invalid publish mode value %s", value) - } - - pConfig.PublishMode = swarm.PortConfigPublishMode(value) - case portOptTargetPort: - tPort, err := strconv.ParseUint(value, 10, 16) - if err != nil { - return err - } - - pConfig.TargetPort = uint32(tPort) - case portOptPublishedPort: - pPort, err := strconv.ParseUint(value, 10, 16) - if err != nil { - return err - } - - pConfig.PublishedPort = uint32(pPort) - default: - return fmt.Errorf("invalid field key %s", key) - } - } - - if pConfig.TargetPort == 0 { - return fmt.Errorf("missing mandatory field %q", portOptTargetPort) - } - - if pConfig.PublishMode == "" { - pConfig.PublishMode = swarm.PortConfigPublishModeIngress - } - - if pConfig.Protocol == "" { - pConfig.Protocol = swarm.PortConfigProtocolTCP - } - - p.ports = append(p.ports, pConfig) - } else { - // short syntax - portConfigs := []swarm.PortConfig{} - ports, portBindingMap, err := nat.ParsePortSpecs([]string{value}) - if err != nil { - return err - } - for _, portBindings := range portBindingMap { - for _, portBinding := range portBindings { - if portBinding.HostIP != "" { - return fmt.Errorf("HostIP is not supported.") - } - } - } - - for port := range ports { - portConfig, err := ConvertPortToPortConfig(port, portBindingMap) - if err != nil { - return err - } - portConfigs = append(portConfigs, portConfig...) - } - p.ports = append(p.ports, portConfigs...) - } - return nil -} - -// Type returns the type of this option -func (p *PortOpt) Type() string { - return "port" -} - -// String returns a string repr of this option -func (p *PortOpt) String() string { - ports := []string{} - for _, port := range p.ports { - repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode) - ports = append(ports, repr) - } - return strings.Join(ports, ", ") -} - -// Value returns the ports -func (p *PortOpt) Value() []swarm.PortConfig { - return p.ports -} - -// ConvertPortToPortConfig converts ports to the swarm type -func ConvertPortToPortConfig( - port nat.Port, - portBindings map[nat.Port][]nat.PortBinding, -) ([]swarm.PortConfig, error) { - ports := []swarm.PortConfig{} - - for _, binding := range portBindings[port] { - hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16) - if err != nil && binding.HostPort != "" { - return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port()) - } - ports = append(ports, swarm.PortConfig{ - //TODO Name: ? - Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())), - TargetPort: uint32(port.Int()), - PublishedPort: uint32(hostPort), - PublishMode: swarm.PortConfigPublishModeIngress, - }) - } - return ports, nil -} diff --git a/vendor/github.com/docker/docker/opts/quotedstring.go b/vendor/github.com/docker/docker/opts/quotedstring.go deleted file mode 100644 index fb1e5374b..000000000 --- a/vendor/github.com/docker/docker/opts/quotedstring.go +++ /dev/null @@ -1,37 +0,0 @@ -package opts - -// QuotedString is a string that may have extra quotes around the value. The -// quotes are stripped from the value. -type QuotedString struct { - value *string -} - -// Set sets a new value -func (s *QuotedString) Set(val string) error { - *s.value = trimQuotes(val) - return nil -} - -// Type returns the type of the value -func (s *QuotedString) Type() string { - return "string" -} - -func (s *QuotedString) String() string { - return string(*s.value) -} - -func trimQuotes(value string) string { - lastIndex := len(value) - 1 - for _, char := range []byte{'\'', '"'} { - if value[0] == char && value[lastIndex] == char { - return value[1:lastIndex] - } - } - return value -} - -// NewQuotedString returns a new quoted string option -func NewQuotedString(value *string) *QuotedString { - return &QuotedString{value: value} -} diff --git a/vendor/github.com/docker/docker/opts/runtime.go b/vendor/github.com/docker/docker/opts/runtime.go deleted file mode 100644 index 4361b3ce0..000000000 --- a/vendor/github.com/docker/docker/opts/runtime.go +++ /dev/null @@ -1,79 +0,0 @@ -package opts - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" -) - -// RuntimeOpt defines a map of Runtimes -type RuntimeOpt struct { - name string - stockRuntimeName string - values *map[string]types.Runtime -} - -// NewNamedRuntimeOpt creates a new RuntimeOpt -func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt { - if ref == nil { - ref = &map[string]types.Runtime{} - } - return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime} -} - -// Name returns the name of the NamedListOpts in the configuration. -func (o *RuntimeOpt) Name() string { - return o.name -} - -// Set validates and updates the list of Runtimes -func (o *RuntimeOpt) Set(val string) error { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid runtime argument: %s", val) - } - - parts[0] = strings.TrimSpace(parts[0]) - parts[1] = strings.TrimSpace(parts[1]) - if parts[0] == "" || parts[1] == "" { - return fmt.Errorf("invalid runtime argument: %s", val) - } - - parts[0] = strings.ToLower(parts[0]) - if parts[0] == o.stockRuntimeName { - return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName) - } - - if _, ok := (*o.values)[parts[0]]; ok { - return fmt.Errorf("runtime '%s' was already defined", parts[0]) - } - - (*o.values)[parts[0]] = types.Runtime{Path: parts[1]} - - return nil -} - -// String returns Runtime values as a string. -func (o *RuntimeOpt) String() string { - var out []string - for k := range *o.values { - out = append(out, k) - } - - return fmt.Sprintf("%v", out) -} - -// GetMap returns a map of Runtimes (name: path) -func (o *RuntimeOpt) GetMap() map[string]types.Runtime { - if o.values != nil { - return *o.values - } - - return map[string]types.Runtime{} -} - -// Type returns the type of the option -func (o *RuntimeOpt) Type() string { - return "runtime" -} diff --git a/vendor/github.com/docker/docker/opts/secret.go b/vendor/github.com/docker/docker/opts/secret.go deleted file mode 100644 index 56ed29eb5..000000000 --- a/vendor/github.com/docker/docker/opts/secret.go +++ /dev/null @@ -1,103 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - - swarmtypes "github.com/docker/docker/api/types/swarm" -) - -// SecretOpt is a Value type for parsing secrets -type SecretOpt struct { - values []*swarmtypes.SecretReference -} - -// Set a new secret value -func (o *SecretOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - options := &swarmtypes.SecretReference{ - File: &swarmtypes.SecretReferenceFileTarget{ - UID: "0", - GID: "0", - Mode: 0444, - }, - } - - // support a simple syntax of --secret foo - if len(fields) == 1 { - options.File.Name = fields[0] - options.SecretName = fields[0] - o.values = append(o.values, options) - return nil - } - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "source", "src": - options.SecretName = value - case "target": - tDir, _ := filepath.Split(value) - if tDir != "" { - return fmt.Errorf("target must not be a path") - } - options.File.Name = value - case "uid": - options.File.UID = value - case "gid": - options.File.GID = value - case "mode": - m, err := strconv.ParseUint(value, 0, 32) - if err != nil { - return fmt.Errorf("invalid mode specified: %v", err) - } - - options.File.Mode = os.FileMode(m) - default: - return fmt.Errorf("invalid field in secret request: %s", key) - } - } - - if options.SecretName == "" { - return fmt.Errorf("source is required") - } - - o.values = append(o.values, options) - return nil -} - -// Type returns the type of this option -func (o *SecretOpt) Type() string { - return "secret" -} - -// String returns a string repr of this option -func (o *SecretOpt) String() string { - secrets := []string{} - for _, secret := range o.values { - repr := fmt.Sprintf("%s -> %s", secret.SecretName, secret.File.Name) - secrets = append(secrets, repr) - } - return strings.Join(secrets, ", ") -} - -// Value returns the secret requests -func (o *SecretOpt) Value() []*swarmtypes.SecretReference { - return o.values -} diff --git a/vendor/github.com/docker/docker/opts/throttledevice.go b/vendor/github.com/docker/docker/opts/throttledevice.go deleted file mode 100644 index 65dd3ebf6..000000000 --- a/vendor/github.com/docker/docker/opts/throttledevice.go +++ /dev/null @@ -1,111 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/go-units" -) - -// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error) - -// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := units.RAMInBytes(split[1]) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - - return &blkiodev.ThrottleDevice{ - Path: split[0], - Rate: uint64(rate), - }, nil -} - -// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := strconv.ParseUint(split[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - - return &blkiodev.ThrottleDevice{ - Path: split[0], - Rate: uint64(rate), - }, nil -} - -// ThrottledeviceOpt defines a map of ThrottleDevices -type ThrottledeviceOpt struct { - values []*blkiodev.ThrottleDevice - validator ValidatorThrottleFctType -} - -// NewThrottledeviceOpt creates a new ThrottledeviceOpt -func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt { - values := []*blkiodev.ThrottleDevice{} - return ThrottledeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt -func (opt *ThrottledeviceOpt) Set(val string) error { - var value *blkiodev.ThrottleDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns ThrottledeviceOpt values as a string. -func (opt *ThrottledeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to ThrottleDevices. -func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { - var throttledevice []*blkiodev.ThrottleDevice - throttledevice = append(throttledevice, opt.values...) - - return throttledevice -} - -// Type returns the option type -func (opt *ThrottledeviceOpt) Type() string { - return "list" -} diff --git a/vendor/github.com/docker/docker/opts/ulimit.go b/vendor/github.com/docker/docker/opts/ulimit.go deleted file mode 100644 index 5adfe3085..000000000 --- a/vendor/github.com/docker/docker/opts/ulimit.go +++ /dev/null @@ -1,57 +0,0 @@ -package opts - -import ( - "fmt" - - "github.com/docker/go-units" -) - -// UlimitOpt defines a map of Ulimits -type UlimitOpt struct { - values *map[string]*units.Ulimit -} - -// NewUlimitOpt creates a new UlimitOpt -func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { - if ref == nil { - ref = &map[string]*units.Ulimit{} - } - return &UlimitOpt{ref} -} - -// Set validates a Ulimit and sets its name as a key in UlimitOpt -func (o *UlimitOpt) Set(val string) error { - l, err := units.ParseUlimit(val) - if err != nil { - return err - } - - (*o.values)[l.Name] = l - - return nil -} - -// String returns Ulimit values as a string. -func (o *UlimitOpt) String() string { - var out []string - for _, v := range *o.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to Ulimits. -func (o *UlimitOpt) GetList() []*units.Ulimit { - var ulimits []*units.Ulimit - for _, v := range *o.values { - ulimits = append(ulimits, v) - } - - return ulimits -} - -// Type returns the option type -func (o *UlimitOpt) Type() string { - return "ulimit" -} diff --git a/vendor/github.com/docker/docker/opts/weightdevice.go b/vendor/github.com/docker/docker/opts/weightdevice.go deleted file mode 100644 index 7e3d064f2..000000000 --- a/vendor/github.com/docker/docker/opts/weightdevice.go +++ /dev/null @@ -1,89 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/docker/api/types/blkiodev" -) - -// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error) - -// ValidateWeightDevice validates that the specified string has a valid device-weight format. -func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - weight, err := strconv.ParseUint(split[1], 10, 0) - if err != nil { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - if weight > 0 && (weight < 10 || weight > 1000) { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - - return &blkiodev.WeightDevice{ - Path: split[0], - Weight: uint16(weight), - }, nil -} - -// WeightdeviceOpt defines a map of WeightDevices -type WeightdeviceOpt struct { - values []*blkiodev.WeightDevice - validator ValidatorWeightFctType -} - -// NewWeightdeviceOpt creates a new WeightdeviceOpt -func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt { - values := []*blkiodev.WeightDevice{} - return WeightdeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt -func (opt *WeightdeviceOpt) Set(val string) error { - var value *blkiodev.WeightDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns WeightdeviceOpt values as a string. -func (opt *WeightdeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to WeightDevices. -func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice { - var weightdevice []*blkiodev.WeightDevice - for _, v := range opt.values { - weightdevice = append(weightdevice, v) - } - - return weightdevice -} - -// Type returns the option type -func (opt *WeightdeviceOpt) Type() string { - return "list" -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir.go deleted file mode 100644 index 8154e83f0..000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir.go +++ /dev/null @@ -1,39 +0,0 @@ -package homedir - -import ( - "os" - "runtime" - - "github.com/opencontainers/runc/libcontainer/user" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - if runtime.GOOS == "windows" { - return "USERPROFILE" - } - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - home := os.Getenv(Key()) - if home == "" && runtime.GOOS != "windows" { - if u, err := user.CurrentUser(); err == nil { - return u.Home - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - if runtime.GOOS == "windows" { - return "%USERPROFILE%" // be careful while using in format functions - } - return "~" -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go deleted file mode 100644 index 012fe52a2..000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build linux - -package homedir - -import ( - "os" - - "github.com/docker/docker/pkg/idtools" -) - -// GetStatic returns the home directory for the current user without calling -// os/user.Current(). This is useful for static-linked binary on glibc-based -// system, because a call to os/user.Current() in a static binary leads to -// segfault due to a glibc issue that won't be fixed in a short term. -// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) -func GetStatic() (string, error) { - uid := os.Getuid() - usr, err := idtools.LookupUID(uid) - if err != nil { - return "", err - } - return usr.Home, nil -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go deleted file mode 100644 index 6b96b856f..000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package homedir - -import ( - "errors" -) - -// GetStatic is not needed for non-linux systems. -// (Precisely, it is needed only for glibc-based linux systems.) -func GetStatic() (string, error) { - return "", errors.New("homedir.GetStatic() is not supported on this system") -} diff --git a/vendor/github.com/docker/docker/pkg/httputils/httputils.go b/vendor/github.com/docker/docker/pkg/httputils/httputils.go deleted file mode 100644 index af86835bd..000000000 --- a/vendor/github.com/docker/docker/pkg/httputils/httputils.go +++ /dev/null @@ -1,56 +0,0 @@ -package httputils - -import ( - "errors" - "fmt" - "net/http" - "regexp" - "strings" - - "github.com/docker/docker/pkg/jsonmessage" -) - -var ( - headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`) - errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`") -) - -// Download requests a given URL and returns an io.Reader. -func Download(url string) (resp *http.Response, err error) { - if resp, err = http.Get(url); err != nil { - return nil, err - } - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) - } - return resp, nil -} - -// NewHTTPRequestError returns a JSON response error. -func NewHTTPRequestError(msg string, res *http.Response) error { - return &jsonmessage.JSONError{ - Message: msg, - Code: res.StatusCode, - } -} - -// ServerHeader contains the server information. -type ServerHeader struct { - App string // docker - Ver string // 1.8.0-dev - OS string // windows or linux -} - -// ParseServerHeader extracts pieces from an HTTP server header -// which is in the format "docker/version (os)" e.g. docker/1.8.0-dev (windows). -func ParseServerHeader(hdr string) (*ServerHeader, error) { - matches := headerRegexp.FindStringSubmatch(hdr) - if len(matches) != 4 { - return nil, errInvalidHeader - } - return &ServerHeader{ - App: strings.TrimSpace(matches[1]), - Ver: strings.TrimSpace(matches[2]), - OS: strings.TrimSpace(matches[3]), - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/httputils/mimetype.go b/vendor/github.com/docker/docker/pkg/httputils/mimetype.go deleted file mode 100644 index abef9e9e8..000000000 --- a/vendor/github.com/docker/docker/pkg/httputils/mimetype.go +++ /dev/null @@ -1,29 +0,0 @@ -package httputils - -import ( - "mime" - "net/http" -) - -// MimeTypes stores the MIME content type. -var MimeTypes = struct { - TextPlain string - OctetStream string -}{"text/plain", "application/octet-stream"} - -// DetectContentType returns a best guess representation of the MIME -// content type for the bytes at c. The value detected by -// http.DetectContentType is guaranteed not be nil, defaulting to -// application/octet-stream when a better guess cannot be made. The -// result of this detection is then run through mime.ParseMediaType() -// which separates the actual MIME string from any parameters. -func DetectContentType(c []byte) (string, map[string]string, error) { - - ct := http.DetectContentType(c) - contentType, args, err := mime.ParseMediaType(ct) - if err != nil { - return "", nil, err - } - - return contentType, args, nil -} diff --git a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go deleted file mode 100644 index de488fb53..000000000 --- a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go +++ /dev/null @@ -1,96 +0,0 @@ -package httputils - -import ( - "fmt" - "io" - "net/http" - "time" - - "github.com/Sirupsen/logrus" -) - -type resumableRequestReader struct { - client *http.Client - request *http.Request - lastRange int64 - totalSize int64 - currentResponse *http.Response - failures uint32 - maxFailures uint32 - waitDuration time.Duration -} - -// ResumableRequestReader makes it possible to resume reading a request's body transparently -// maxfail is the number of times we retry to make requests again (not resumes) -// totalsize is the total length of the body; auto detect if not provided -func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { - return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, waitDuration: 5 * time.Second} -} - -// ResumableRequestReaderWithInitialResponse makes it possible to resume -// reading the body of an already initiated request. -func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { - return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse, waitDuration: 5 * time.Second} -} - -func (r *resumableRequestReader) Read(p []byte) (n int, err error) { - if r.client == nil || r.request == nil { - return 0, fmt.Errorf("client and request can't be nil\n") - } - isFreshRequest := false - if r.lastRange != 0 && r.currentResponse == nil { - readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) - r.request.Header.Set("Range", readRange) - time.Sleep(r.waitDuration) - } - if r.currentResponse == nil { - r.currentResponse, err = r.client.Do(r.request) - isFreshRequest = true - } - if err != nil && r.failures+1 != r.maxFailures { - r.cleanUpResponse() - r.failures++ - time.Sleep(time.Duration(r.failures) * r.waitDuration) - return 0, nil - } else if err != nil { - r.cleanUpResponse() - return 0, err - } - if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { - r.cleanUpResponse() - return 0, io.EOF - } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { - r.cleanUpResponse() - return 0, fmt.Errorf("the server doesn't support byte ranges") - } - if r.totalSize == 0 { - r.totalSize = r.currentResponse.ContentLength - } else if r.totalSize <= 0 { - r.cleanUpResponse() - return 0, fmt.Errorf("failed to auto detect content length") - } - n, err = r.currentResponse.Body.Read(p) - r.lastRange += int64(n) - if err != nil { - r.cleanUpResponse() - } - if err != nil && err != io.EOF { - logrus.Infof("encountered error during pull and clearing it before resume: %s", err) - err = nil - } - return n, err -} - -func (r *resumableRequestReader) Close() error { - r.cleanUpResponse() - r.client = nil - r.request = nil - return nil -} - -func (r *resumableRequestReader) cleanUpResponse() { - if r.currentResponse != nil { - r.currentResponse.Body.Close() - r.currentResponse = nil - } -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go deleted file mode 100644 index 6bca46628..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ /dev/null @@ -1,197 +0,0 @@ -package idtools - -import ( - "bufio" - "fmt" - "os" - "sort" - "strconv" - "strings" -) - -// IDMap contains a single entry for user namespace range remapping. An array -// of IDMap entries represents the structure that will be provided to the Linux -// kernel for creating a user namespace. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -type subIDRange struct { - Start int - Length int -} - -type ranges []subIDRange - -func (e ranges) Len() int { return len(e) } -func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } - -const ( - subuidFileName string = "/etc/subuid" - subgidFileName string = "/etc/subgid" -) - -// MkdirAllAs creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership to the requested uid/gid pair. -func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, true) -} - -// MkdirAllNewAs creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership will be performed -func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, false) -} - -// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership -func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, false, true) -} - -// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - var uid, gid int - - if uidMap != nil { - xUID, err := ToHost(0, uidMap) - if err != nil { - return -1, -1, err - } - uid = xUID - } - if gidMap != nil { - xGID, err := ToHost(0, gidMap) - if err != nil { - return -1, -1, err - } - gid = xGID - } - return uid, gid, nil -} - -// ToContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func ToContainer(hostID int, idMap []IDMap) (int, error) { - if idMap == nil { - return hostID, nil - } - for _, m := range idMap { - if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { - contID := m.ContainerID + (hostID - m.HostID) - return contID, nil - } - } - return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) -} - -// ToHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func ToHost(contID int, idMap []IDMap) (int, error) { - if idMap == nil { - return contID, nil - } - for _, m := range idMap { - if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (contID - m.ContainerID) - return hostID, nil - } - } - return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) -} - -// CreateIDMappings takes a requested user and group name and -// using the data from /etc/sub{uid,gid} ranges, creates the -// proper uid and gid remapping ranges for that user/group pair -func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { - subuidRanges, err := parseSubuid(username) - if err != nil { - return nil, nil, err - } - subgidRanges, err := parseSubgid(groupname) - if err != nil { - return nil, nil, err - } - if len(subuidRanges) == 0 { - return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) - } - if len(subgidRanges) == 0 { - return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) - } - - return createIDMap(subuidRanges), createIDMap(subgidRanges), nil -} - -func createIDMap(subidRanges ranges) []IDMap { - idMap := []IDMap{} - - // sort the ranges by lowest ID first - sort.Sort(subidRanges) - containerID := 0 - for _, idrange := range subidRanges { - idMap = append(idMap, IDMap{ - ContainerID: containerID, - HostID: idrange.Start, - Size: idrange.Length, - }) - containerID = containerID + idrange.Length - } - return idMap -} - -func parseSubuid(username string) (ranges, error) { - return parseSubidFile(subuidFileName, username) -} - -func parseSubgid(username string) (ranges, error) { - return parseSubidFile(subgidFileName, username) -} - -// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) -// and return all found ranges for a specified username. If the special value -// "ALL" is supplied for username, then all ranges in the file will be returned -func parseSubidFile(path, username string) (ranges, error) { - var rangeList ranges - - subidFile, err := os.Open(path) - if err != nil { - return rangeList, err - } - defer subidFile.Close() - - s := bufio.NewScanner(subidFile) - for s.Scan() { - if err := s.Err(); err != nil { - return rangeList, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" || strings.HasPrefix(text, "#") { - continue - } - parts := strings.Split(text, ":") - if len(parts) != 3 { - return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) - } - if parts[0] == username || username == "ALL" { - startid, err := strconv.Atoi(parts[1]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - length, err := strconv.Atoi(parts[2]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - rangeList = append(rangeList, subIDRange{startid, length}) - } - } - return rangeList, nil -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go deleted file mode 100644 index 7c7e82aee..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ /dev/null @@ -1,204 +0,0 @@ -// +build !windows - -package idtools - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/docker/docker/pkg/system" - "github.com/opencontainers/runc/libcontainer/user" -) - -var ( - entOnce sync.Once - getentCmd string -) - -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - // make an array containing the original path asked for, plus (for mkAll == true) - // all path components leading up to the complete path that don't exist before we MkdirAll - // so that we can chown all of them properly at the end. If chownExisting is false, we won't - // chown the full directory path if it exists - var paths []string - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - paths = []string{path} - } else if err == nil && chownExisting { - // short-circuit--we were called with an existing directory and chown was requested - return os.Chown(path, ownerUID, ownerGID) - } else if err == nil { - // nothing to do; directory path fully exists already and chown was NOT requested - return nil - } - - if mkAll { - // walk back to "/" looking for directories which do not exist - // and add them to the paths array for chown after creation - dirPath := path - for { - dirPath = filepath.Dir(dirPath) - if dirPath == "/" { - break - } - if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { - paths = append(paths, dirPath) - } - } - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { - return err - } - } else { - if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { - return err - } - } - // even if it existed, we will chown the requested path + any subpaths that - // didn't exist when we called MkdirAll - for _, pathComponent := range paths { - if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { - return err - } - } - return nil -} - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -func CanAccess(path string, uid, gid int) bool { - statInfo, err := system.Stat(path) - if err != nil { - return false - } - fileMode := os.FileMode(statInfo.Mode()) - permBits := fileMode.Perm() - return accessible(statInfo.UID() == uint32(uid), - statInfo.GID() == uint32(gid), permBits) -} - -func accessible(isOwner, isGroup bool, perms os.FileMode) bool { - if isOwner && (perms&0100 == 0100) { - return true - } - if isGroup && (perms&0010 == 0010) { - return true - } - if perms&0001 == 0001 { - return true - } - return false -} - -// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUser(username string) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUser(username) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) - if err != nil { - return user.User{}, err - } - return usr, nil -} - -// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUID(uid int) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUid(uid) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) -} - -func getentUser(args string) (user.User, error) { - reader, err := callGetent(args) - if err != nil { - return user.User{}, err - } - users, err := user.ParsePasswd(reader) - if err != nil { - return user.User{}, err - } - if len(users) == 0 { - return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) - } - return users[0], nil -} - -// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGroup(groupname string) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGroup(groupname) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) -} - -// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGID(gid int) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGid(gid) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %d", "group", gid)) -} - -func getentGroup(args string) (user.Group, error) { - reader, err := callGetent(args) - if err != nil { - return user.Group{}, err - } - groups, err := user.ParseGroup(reader) - if err != nil { - return user.Group{}, err - } - if len(groups) == 0 { - return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) - } - return groups[0], nil -} - -func callGetent(args string) (io.Reader, error) { - entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) - // if no `getent` command on host, can't do anything else - if getentCmd == "" { - return nil, fmt.Errorf("") - } - out, err := execCmd(getentCmd, args) - if err != nil { - exitCode, errC := system.GetExitCode(err) - if errC != nil { - return nil, err - } - switch exitCode { - case 1: - return nil, fmt.Errorf("getent reported invalid parameters/database unknown") - case 2: - terms := strings.Split(args, " ") - return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) - case 3: - return nil, fmt.Errorf("getent database doesn't support enumeration") - default: - return nil, err - } - - } - return bytes.NewReader(out), nil -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go deleted file mode 100644 index 49f67e78c..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package idtools - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -// Platforms such as Windows do not support the UID/GID concept. So make this -// just a wrapper around system.MkdirAll. -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { - return err - } - return nil -} - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -// Windows does not require/support this function, so always return true -func CanAccess(path string, uid, gid int) bool { - return true -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go deleted file mode 100644 index 9da7975e2..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ /dev/null @@ -1,164 +0,0 @@ -package idtools - -import ( - "fmt" - "regexp" - "sort" - "strconv" - "strings" - "sync" -) - -// add a user and/or group to Linux /etc/passwd, /etc/group using standard -// Linux distribution commands: -// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group -// useradd -r -s /bin/false - -var ( - once sync.Once - userCommand string - - cmdTemplates = map[string]string{ - "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", - "useradd": "-r -s /bin/false %s", - "usermod": "-%s %d-%d %s", - } - - idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) - // default length for a UID/GID subordinate range - defaultRangeLen = 65536 - defaultRangeStart = 100000 - userMod = "usermod" -) - -// AddNamespaceRangesUser takes a username and uses the standard system -// utility to create a system user/group pair used to hold the -// /etc/sub{uid,gid} ranges which will be used for user namespace -// mapping ranges in containers. -func AddNamespaceRangesUser(name string) (int, int, error) { - if err := addUser(name); err != nil { - return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) - } - - // Query the system for the created uid and gid pair - out, err := execCmd("id", name) - if err != nil { - return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) - } - matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) - if len(matches) != 3 { - return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) - } - uid, err := strconv.Atoi(matches[1]) - if err != nil { - return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) - } - gid, err := strconv.Atoi(matches[2]) - if err != nil { - return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) - } - - // Now we need to create the subuid/subgid ranges for our new user/group (system users - // do not get auto-created ranges in subuid/subgid) - - if err := createSubordinateRanges(name); err != nil { - return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) - } - return uid, gid, nil -} - -func addUser(userName string) error { - once.Do(func() { - // set up which commands are used for adding users/groups dependent on distro - if _, err := resolveBinary("adduser"); err == nil { - userCommand = "adduser" - } else if _, err := resolveBinary("useradd"); err == nil { - userCommand = "useradd" - } - }) - if userCommand == "" { - return fmt.Errorf("Cannot add user; no useradd/adduser binary found") - } - args := fmt.Sprintf(cmdTemplates[userCommand], userName) - out, err := execCmd(userCommand, args) - if err != nil { - return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) - } - return nil -} - -func createSubordinateRanges(name string) error { - - // first, we should verify that ranges weren't automatically created - // by the distro tooling - ranges, err := parseSubuid(name) - if err != nil { - return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) - } - if len(ranges) == 0 { - // no UID ranges; let's create one - startID, err := findNextUIDRange() - if err != nil { - return fmt.Errorf("Can't find available subuid range: %v", err) - } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) - if err != nil { - return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) - } - } - - ranges, err = parseSubgid(name) - if err != nil { - return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) - } - if len(ranges) == 0 { - // no GID ranges; let's create one - startID, err := findNextGIDRange() - if err != nil { - return fmt.Errorf("Can't find available subgid range: %v", err) - } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) - if err != nil { - return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) - } - } - return nil -} - -func findNextUIDRange() (int, error) { - ranges, err := parseSubuid("ALL") - if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextGIDRange() (int, error) { - ranges, err := parseSubgid("ALL") - if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextRangeStart(rangeList ranges) (int, error) { - startID := defaultRangeStart - for _, arange := range rangeList { - if wouldOverlap(arange, startID) { - startID = arange.Start + arange.Length - } - } - return startID, nil -} - -func wouldOverlap(arange subIDRange, ID int) bool { - low := ID - high := ID + defaultRangeLen - if (low >= arange.Start && low <= arange.Start+arange.Length) || - (high <= arange.Start+arange.Length && high >= arange.Start) { - return true - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go deleted file mode 100644 index d98b354cb..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux - -package idtools - -import "fmt" - -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -func AddNamespaceRangesUser(name string) (int, int, error) { - return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go deleted file mode 100644 index 9703ecbd9..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build !windows - -package idtools - -import ( - "fmt" - "os/exec" - "path/filepath" - "strings" -) - -func resolveBinary(binname string) (string, error) { - binaryPath, err := exec.LookPath(binname) - if err != nil { - return "", err - } - resolvedPath, err := filepath.EvalSymlinks(binaryPath) - if err != nil { - return "", err - } - //only return no error if the final resolved binary basename - //matches what was searched for - if filepath.Base(resolvedPath) == binname { - return resolvedPath, nil - } - return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) -} - -func execCmd(cmd, args string) ([]byte, error) { - execCmd := exec.Command(cmd, strings.Split(args, " ")...) - return execCmd.CombinedOutput() -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go deleted file mode 100644 index 4734c3111..000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go +++ /dev/null @@ -1,42 +0,0 @@ -package jsonlog - -import ( - "encoding/json" - "fmt" - "time" -) - -// JSONLog represents a log message, typically a single entry from a given log stream. -// JSONLogs can be easily serialized to and from JSON and support custom formatting. -type JSONLog struct { - // Log is the log message - Log string `json:"log,omitempty"` - // Stream is the log source - Stream string `json:"stream,omitempty"` - // Created is the created timestamp of log - Created time.Time `json:"time"` - // Attrs is the list of extra attributes provided by the user - Attrs map[string]string `json:"attrs,omitempty"` -} - -// Format returns the log formatted according to format -// If format is nil, returns the log message -// If format is json, returns the log marshaled in json format -// By default, returns the log with the log time formatted according to format. -func (jl *JSONLog) Format(format string) (string, error) { - if format == "" { - return jl.Log, nil - } - if format == "json" { - m, err := json.Marshal(jl) - return string(m), err - } - return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil -} - -// Reset resets the log to nil. -func (jl *JSONLog) Reset() { - jl.Log = "" - jl.Stream = "" - jl.Created = time.Time{} -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go deleted file mode 100644 index 83ce684a8..000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go +++ /dev/null @@ -1,178 +0,0 @@ -// This code was initially generated by ffjson -// This code was generated via the following steps: -// $ go get -u github.com/pquerna/ffjson -// $ make BIND_DIR=. shell -// $ ffjson pkg/jsonlog/jsonlog.go -// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go -// -// It has been modified to improve the performance of time marshalling to JSON -// and to clean it up. -// Should this code need to be regenerated when the JSONLog struct is changed, -// the relevant changes which have been made are: -// import ( -// "bytes" -//- -// "unicode/utf8" -// ) -// -// func (mj *JSONLog) MarshalJSON() ([]byte, error) { -//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { -// } -// return buf.Bytes(), nil -// } -//+ -// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -//- var err error -//- var obj []byte -//- var first bool = true -//- _ = obj -//- _ = err -//- _ = first -//+ var ( -//+ err error -//+ timestamp string -//+ first bool = true -//+ ) -// buf.WriteString(`{`) -// if len(mj.Log) != 0 { -// if first == true { -//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// buf.WriteString(`,`) -// } -// buf.WriteString(`"time":`) -//- obj, err = mj.Created.MarshalJSON() -//+ timestamp, err = FastTimeMarshalJSON(mj.Created) -// if err != nil { -// return err -// } -//- buf.Write(obj) -//+ buf.WriteString(timestamp) -// buf.WriteString(`}`) -// return nil -// } -// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// if len(mj.Log) != 0 { -// - if first == true { -// - first = false -// - } else { -// - buf.WriteString(`,`) -// - } -// + first = false -// buf.WriteString(`"log":`) -// ffjsonWriteJSONString(buf, mj.Log) -// } - -package jsonlog - -import ( - "bytes" - "unicode/utf8" -) - -// MarshalJSON marshals the JSONLog. -func (mj *JSONLog) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - buf.Grow(1024) - if err := mj.MarshalJSONBuf(&buf); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. -func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { - var ( - err error - timestamp string - first = true - ) - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) - } - if !first { - buf.WriteString(`,`) - } - buf.WriteString(`"time":`) - timestamp, err = FastTimeMarshalJSON(mj.Created) - if err != nil { - return err - } - buf.WriteString(timestamp) - buf.WriteString(`}`) - return nil -} - -func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.WriteString(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.WriteString(s[start:]) - } - buf.WriteByte('"') -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go deleted file mode 100644 index 0ba716f26..000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go +++ /dev/null @@ -1,122 +0,0 @@ -package jsonlog - -import ( - "bytes" - "encoding/json" - "unicode/utf8" -) - -// JSONLogs is based on JSONLog. -// It allows marshalling JSONLog from Log as []byte -// and an already marshalled Created timestamp. -type JSONLogs struct { - Log []byte `json:"log,omitempty"` - Stream string `json:"stream,omitempty"` - Created string `json:"time"` - - // json-encoded bytes - RawAttrs json.RawMessage `json:"attrs,omitempty"` -} - -// MarshalJSONBuf is based on the same method from JSONLog -// It has been modified to take into account the necessary changes. -func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { - var first = true - - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONBytesAsString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) - } - if len(mj.RawAttrs) > 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"attrs":`) - buf.Write(mj.RawAttrs) - } - if !first { - buf.WriteString(`,`) - } - buf.WriteString(`"time":`) - buf.WriteString(mj.Created) - buf.WriteString(`}`) - return nil -} - -// This is based on ffjsonWriteJSONBytesAsString. It has been changed -// to accept a string passed as a slice of bytes. -func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.Write(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.Write(s[start:]) - } - buf.WriteByte('"') -} diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go deleted file mode 100644 index 211733814..000000000 --- a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. -package jsonlog - -import ( - "errors" - "time" -) - -const ( - // RFC3339NanoFixed is our own version of RFC339Nano because we want one - // that pads the nano seconds part with zeros to ensure - // the timestamps are aligned in the logs. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - // JSONFormat is the format used by FastMarshalJSON - JSONFormat = `"` + time.RFC3339Nano + `"` -) - -// FastTimeMarshalJSON avoids one of the extra allocations that -// time.MarshalJSON is making. -func FastTimeMarshalJSON(t time.Time) (string, error) { - if y := t.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See golang.org/issue/4556#c15 for more discussion. - return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") - } - return t.Format(JSONFormat), nil -} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go deleted file mode 100644 index c3b1371cd..000000000 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ /dev/null @@ -1,297 +0,0 @@ -package jsonmessage - -import ( - "encoding/json" - "fmt" - "io" - "os" - "strings" - "time" - - "github.com/Nvveen/Gotty" - - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/term" - "github.com/docker/go-units" -) - -// JSONError wraps a concrete Code and Message, `Code` is -// is an integer error code, `Message` is the error message. -type JSONError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (e *JSONError) Error() string { - return e.Message -} - -// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, -// Start is the initial value for the operation. Current is the current status and -// value of the progress made towards Total. Total is the end value describing when -// we made 100% progress for an operation. -type JSONProgress struct { - terminalFd uintptr - Current int64 `json:"current,omitempty"` - Total int64 `json:"total,omitempty"` - Start int64 `json:"start,omitempty"` - // If true, don't show xB/yB - HideCounts bool `json:"hidecounts,omitempty"` -} - -func (p *JSONProgress) String() string { - var ( - width = 200 - pbBox string - numbersBox string - timeLeftBox string - ) - - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - width = int(ws.Width) - } - - if p.Current <= 0 && p.Total <= 0 { - return "" - } - current := units.HumanSize(float64(p.Current)) - if p.Total <= 0 { - return fmt.Sprintf("%8v", current) - } - total := units.HumanSize(float64(p.Total)) - percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 - if percentage > 50 { - percentage = 50 - } - if width > 110 { - // this number can't be negative gh#7136 - numSpaces := 0 - if 50-percentage > 0 { - numSpaces = 50 - percentage - } - pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) - } - - if !p.HideCounts { - numbersBox = fmt.Sprintf("%8v/%v", current, total) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%8v", current) - } - } - - if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0)) - perEntry := fromStart / time.Duration(p.Current) - left := time.Duration(p.Total-p.Current) * perEntry - left = (left / time.Second) * time.Second - - if width > 50 { - timeLeftBox = " " + left.String() - } - } - return pbBox + numbersBox + timeLeftBox -} - -// JSONMessage defines a message struct. It describes -// the created time, where it from, status, ID of the -// message. It's used for docker events. -type JSONMessage struct { - Stream string `json:"stream,omitempty"` - Status string `json:"status,omitempty"` - Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` //deprecated - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` - Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` //deprecated - // Aux contains out-of-band data, such as digests for push signing. - Aux *json.RawMessage `json:"aux,omitempty"` -} - -/* Satisfied by gotty.TermInfo as well as noTermInfo from below */ -type termInfo interface { - Parse(attr string, params ...interface{}) (string, error) -} - -type noTermInfo struct{} // canary used when no terminfo. - -func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) { - return "", fmt.Errorf("noTermInfo") -} - -func clearLine(out io.Writer, ti termInfo) { - // el2 (clear whole line) is not exposed by terminfo. - - // First clear line from beginning to cursor - if attr, err := ti.Parse("el1"); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[1K") - } - // Then clear line from cursor to end - if attr, err := ti.Parse("el"); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[K") - } -} - -func cursorUp(out io.Writer, ti termInfo, l int) { - if l == 0 { // Should never be the case, but be tolerant - return - } - if attr, err := ti.Parse("cuu", l); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[%dA", l) - } -} - -func cursorDown(out io.Writer, ti termInfo, l int) { - if l == 0 { // Should never be the case, but be tolerant - return - } - if attr, err := ti.Parse("cud", l); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[%dB", l) - } -} - -// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out` -// is a terminal. If this is the case, it will erase the entire current line -// when displaying the progressbar. -func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { - if jm.Error != nil { - if jm.Error.Code == 401 { - return fmt.Errorf("Authentication is required.") - } - return jm.Error - } - var endl string - if termInfo != nil && jm.Stream == "" && jm.Progress != nil { - clearLine(out, termInfo) - endl = "\r" - fmt.Fprintf(out, endl) - } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal - return nil - } - if jm.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed)) - } else if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed)) - } - if jm.ID != "" { - fmt.Fprintf(out, "%s: ", jm.ID) - } - if jm.From != "" { - fmt.Fprintf(out, "(from %s) ", jm.From) - } - if jm.Progress != nil && termInfo != nil { - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - } else if jm.ProgressMessage != "" { //deprecated - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - } else if jm.Stream != "" { - fmt.Fprintf(out, "%s%s", jm.Stream, endl) - } else { - fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - } - return nil -} - -// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` -// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of -// each line and move the cursor while displaying. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { - var ( - dec = json.NewDecoder(in) - ids = make(map[string]int) - ) - - var termInfo termInfo - - if isTerminal { - term := os.Getenv("TERM") - if term == "" { - term = "vt102" - } - - var err error - if termInfo, err = gotty.OpenTermInfo(term); err != nil { - termInfo = &noTermInfo{} - } - } - - for { - diff := 0 - var jm JSONMessage - if err := dec.Decode(&jm); err != nil { - if err == io.EOF { - break - } - return err - } - - if jm.Aux != nil { - if auxCallback != nil { - auxCallback(jm.Aux) - } - continue - } - - if jm.Progress != nil { - jm.Progress.terminalFd = terminalFd - } - if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { - line, ok := ids[jm.ID] - if !ok { - // NOTE: This approach of using len(id) to - // figure out the number of lines of history - // only works as long as we clear the history - // when we output something that's not - // accounted for in the map, such as a line - // with no ID. - line = len(ids) - ids[jm.ID] = line - if termInfo != nil { - fmt.Fprintf(out, "\n") - } - } - diff = len(ids) - line - if termInfo != nil { - cursorUp(out, termInfo, diff) - } - } else { - // When outputting something that isn't progress - // output, clear the history of previous lines. We - // don't want progress entries from some previous - // operation to be updated (for example, pull -a - // with multiple tags). - ids = make(map[string]int) - } - err := jm.Display(out, termInfo) - if jm.ID != "" && termInfo != nil { - cursorDown(out, termInfo, diff) - } - if err != nil { - return err - } - } - return nil -} - -type stream interface { - io.Writer - FD() uintptr - IsTerminal() bool -} - -// DisplayJSONMessagesToStream prints json messages to the output stream -func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error { - return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go deleted file mode 100644 index 7738fc741..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build !windows - -// Package kernel provides helper function to get, parse and compare kernel -// versions for different platforms. -package kernel - -import ( - "errors" - "fmt" -) - -// VersionInfo holds information about the kernel. -type VersionInfo struct { - Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) - Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) - Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) - Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) -} - -func (k *VersionInfo) String() string { - return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) -} - -// CompareKernelVersion compares two kernel.VersionInfo structs. -// Returns -1 if a < b, 0 if a == b, 1 it a > b -func CompareKernelVersion(a, b VersionInfo) int { - if a.Kernel < b.Kernel { - return -1 - } else if a.Kernel > b.Kernel { - return 1 - } - - if a.Major < b.Major { - return -1 - } else if a.Major > b.Major { - return 1 - } - - if a.Minor < b.Minor { - return -1 - } else if a.Minor > b.Minor { - return 1 - } - - return 0 -} - -// ParseRelease parses a string and creates a VersionInfo based on it. -func ParseRelease(release string) (*VersionInfo, error) { - var ( - kernel, major, minor, parsed int - flavor, partial string - ) - - // Ignore error from Sscanf to allow an empty flavor. Instead, just - // make sure we got all the version numbers. - parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) - if parsed < 2 { - return nil, errors.New("Can't parse kernel version " + release) - } - - // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 - parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) - if parsed < 1 { - flavor = partial - } - - return &VersionInfo{ - Kernel: kernel, - Major: major, - Minor: minor, - Flavor: flavor, - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go deleted file mode 100644 index 71f205b28..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build darwin - -// Package kernel provides helper function to get, parse and compare kernel -// versions for different platforms. -package kernel - -import ( - "fmt" - "os/exec" - "strings" - - "github.com/mattn/go-shellwords" -) - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - release, err := getRelease() - if err != nil { - return nil, err - } - - return ParseRelease(release) -} - -// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version -func getRelease() (string, error) { - cmd := exec.Command("system_profiler", "SPSoftwareDataType") - osName, err := cmd.Output() - if err != nil { - return "", err - } - - var release string - data := strings.Split(string(osName), "\n") - for _, line := range data { - if strings.Contains(line, "Kernel Version") { - // It has the format like ' Kernel Version: Darwin 14.5.0' - content := strings.SplitN(line, ":", 2) - if len(content) != 2 { - return "", fmt.Errorf("Kernel Version is invalid") - } - - prettyNames, err := shellwords.Parse(content[1]) - if err != nil { - return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) - } - - if len(prettyNames) != 2 { - return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") - } - release = prettyNames[1] - } - } - - return release, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go deleted file mode 100644 index bd137dfb6..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build linux freebsd solaris openbsd - -// Package kernel provides helper function to get, parse and compare kernel -// versions for different platforms. -package kernel - -import ( - "bytes" - - "github.com/Sirupsen/logrus" -) - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - uts, err := uname() - if err != nil { - return nil, err - } - - release := make([]byte, len(uts.Release)) - - i := 0 - for _, c := range uts.Release { - release[i] = byte(c) - i++ - } - - // Remove the \x00 from the release for Atoi to parse correctly - release = release[:bytes.IndexByte(release, 0)] - - return ParseRelease(string(release)) -} - -// CheckKernelVersion checks if current kernel is newer than (or equal to) -// the given version. -func CheckKernelVersion(k, major, minor int) bool { - if v, err := GetKernelVersion(); err != nil { - logrus.Warnf("error getting kernel version: %s", err) - } else { - if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { - return false - } - } - return true -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go deleted file mode 100644 index 80fab8ff6..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build windows - -package kernel - -import ( - "fmt" - "syscall" - "unsafe" -) - -// VersionInfo holds information about the kernel. -type VersionInfo struct { - kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) - major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) - minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) - build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) -} - -func (k *VersionInfo) String() string { - return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) -} - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - - var ( - h syscall.Handle - dwVersion uint32 - err error - ) - - KVI := &VersionInfo{"Unknown", 0, 0, 0} - - if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, - syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), - 0, - syscall.KEY_READ, - &h); err != nil { - return KVI, err - } - defer syscall.RegCloseKey(h) - - var buf [1 << 10]uint16 - var typ uint32 - n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 - - if err = syscall.RegQueryValueEx(h, - syscall.StringToUTF16Ptr("BuildLabEx"), - nil, - &typ, - (*byte)(unsafe.Pointer(&buf[0])), - &n); err != nil { - return KVI, err - } - - KVI.kvi = syscall.UTF16ToString(buf[:]) - - // Important - docker.exe MUST be manifested for this API to return - // the correct information. - if dwVersion, err = syscall.GetVersion(); err != nil { - return KVI, err - } - - KVI.major = int(dwVersion & 0xFF) - KVI.minor = int((dwVersion & 0XFF00) >> 8) - KVI.build = int((dwVersion & 0xFFFF0000) >> 16) - - return KVI, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go deleted file mode 100644 index bb9b32641..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go +++ /dev/null @@ -1,19 +0,0 @@ -package kernel - -import ( - "syscall" -) - -// Utsname represents the system name structure. -// It is passthrough for syscall.Utsname in order to make it portable with -// other platforms where it is not available. -type Utsname syscall.Utsname - -func uname() (*syscall.Utsname, error) { - uts := &syscall.Utsname{} - - if err := syscall.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go deleted file mode 100644 index 49370bd3d..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go +++ /dev/null @@ -1,14 +0,0 @@ -package kernel - -import ( - "golang.org/x/sys/unix" -) - -func uname() (*unix.Utsname, error) { - uts := &unix.Utsname{} - - if err := unix.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go deleted file mode 100644 index 1da3f239f..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !linux,!solaris - -package kernel - -import ( - "errors" -) - -// Utsname represents the system name structure. -// It is defined here to make it portable as it is available on linux but not -// on windows. -type Utsname struct { - Release [65]byte -} - -func uname() (*Utsname, error) { - return nil, errors.New("Kernel version detection is available only on linux") -} diff --git a/vendor/github.com/docker/docker/pkg/random/random.go b/vendor/github.com/docker/docker/pkg/random/random.go deleted file mode 100644 index 70de4d130..000000000 --- a/vendor/github.com/docker/docker/pkg/random/random.go +++ /dev/null @@ -1,71 +0,0 @@ -package random - -import ( - cryptorand "crypto/rand" - "io" - "math" - "math/big" - "math/rand" - "sync" - "time" -) - -// Rand is a global *rand.Rand instance, which initialized with NewSource() source. -var Rand = rand.New(NewSource()) - -// Reader is a global, shared instance of a pseudorandom bytes generator. -// It doesn't consume entropy. -var Reader io.Reader = &reader{rnd: Rand} - -// copypaste from standard math/rand -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// NewSource returns math/rand.Source safe for concurrent use and initialized -// with current unix-nano timestamp -func NewSource() rand.Source { - var seed int64 - if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { - // This should not happen, but worst-case fallback to time-based seed. - seed = time.Now().UnixNano() - } else { - seed = cryptoseed.Int64() - } - return &lockedSource{ - src: rand.NewSource(seed), - } -} - -type reader struct { - rnd *rand.Rand -} - -func (r *reader) Read(b []byte) (int, error) { - i := 0 - for { - val := r.rnd.Int63() - for val > 0 { - b[i] = byte(val) - i++ - if i == len(b) { - return i, nil - } - val >>= 8 - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/stringid/README.md b/vendor/github.com/docker/docker/pkg/stringid/README.md deleted file mode 100644 index 37a5098fd..000000000 --- a/vendor/github.com/docker/docker/pkg/stringid/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions for dealing with string identifiers diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go deleted file mode 100644 index 82f85596c..000000000 --- a/vendor/github.com/docker/docker/pkg/stringid/stringid.go +++ /dev/null @@ -1,81 +0,0 @@ -// Package stringid provides helper functions for dealing with string identifiers -package stringid - -import ( - "crypto/rand" - "encoding/hex" - "fmt" - "io" - "regexp" - "strconv" - "strings" - - "github.com/docker/docker/pkg/random" -) - -const shortLen = 12 - -var ( - validShortID = regexp.MustCompile("^[a-f0-9]{12}$") - validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) -) - -// IsShortID determines if an arbitrary string *looks like* a short ID. -func IsShortID(id string) bool { - return validShortID.MatchString(id) -} - -// TruncateID returns a shorthand version of a string identifier for convenience. -// A collision with other shorthands is very unlikely, but possible. -// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller -// will need to use a longer prefix, or the full-length Id. -func TruncateID(id string) string { - if i := strings.IndexRune(id, ':'); i >= 0 { - id = id[i+1:] - } - if len(id) > shortLen { - id = id[:shortLen] - } - return id -} - -func generateID(crypto bool) string { - b := make([]byte, 32) - r := random.Reader - if crypto { - r = rand.Reader - } - for { - if _, err := io.ReadFull(r, b); err != nil { - panic(err) // This shouldn't happen - } - id := hex.EncodeToString(b) - // if we try to parse the truncated for as an int and we don't have - // an error then the value is all numeric and causes issues when - // used as a hostname. ref #3869 - if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { - continue - } - return id - } -} - -// GenerateRandomID returns a unique id. -func GenerateRandomID() string { - return generateID(true) -} - -// GenerateNonCryptoID generates unique id without using cryptographically -// secure sources of random. -// It helps you to save entropy. -func GenerateNonCryptoID() string { - return generateID(false) -} - -// ValidateID checks whether an ID string is a valid image ID. -func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID %q is invalid", id) - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go deleted file mode 100644 index b42983e98..000000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go +++ /dev/null @@ -1,21 +0,0 @@ -package tarsum - -// BuilderContext is an interface extending TarSum by adding the Remove method. -// In general there was concern about adding this method to TarSum itself -// so instead it is being added just to "BuilderContext" which will then -// only be used during the .dockerignore file processing -// - see builder/evaluator.go -type BuilderContext interface { - TarSum - Remove(string) -} - -func (bc *tarSum) Remove(filename string) { - for i, fis := range bc.sums { - if fis.Name() == filename { - bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) - // Note, we don't just return because there could be - // more than one with this name - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go deleted file mode 100644 index 5abf5e7ba..000000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go +++ /dev/null @@ -1,126 +0,0 @@ -package tarsum - -import "sort" - -// FileInfoSumInterface provides an interface for accessing file checksum -// information within a tar file. This info is accessed through interface -// so the actual name and sum cannot be melded with. -type FileInfoSumInterface interface { - // File name - Name() string - // Checksum of this particular file and its headers - Sum() string - // Position of file in the tar - Pos() int64 -} - -type fileInfoSum struct { - name string - sum string - pos int64 -} - -func (fis fileInfoSum) Name() string { - return fis.name -} -func (fis fileInfoSum) Sum() string { - return fis.sum -} -func (fis fileInfoSum) Pos() int64 { - return fis.pos -} - -// FileInfoSums provides a list of FileInfoSumInterfaces. -type FileInfoSums []FileInfoSumInterface - -// GetFile returns the first FileInfoSumInterface with a matching name. -func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { - for i := range fis { - if fis[i].Name() == name { - return fis[i] - } - } - return nil -} - -// GetAllFile returns a FileInfoSums with all matching names. -func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { - f := FileInfoSums{} - for i := range fis { - if fis[i].Name() == name { - f = append(f, fis[i]) - } - } - return f -} - -// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. -func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { - seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. - for i := range fis { - f := fis[i] - if _, ok := seen[f.Name()]; ok { - dups = append(dups, f) - } else { - seen[f.Name()] = 0 - } - } - return dups -} - -// Len returns the size of the FileInfoSums. -func (fis FileInfoSums) Len() int { return len(fis) } - -// Swap swaps two FileInfoSum values if a FileInfoSums list. -func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } - -// SortByPos sorts FileInfoSums content by position. -func (fis FileInfoSums) SortByPos() { - sort.Sort(byPos{fis}) -} - -// SortByNames sorts FileInfoSums content by name. -func (fis FileInfoSums) SortByNames() { - sort.Sort(byName{fis}) -} - -// SortBySums sorts FileInfoSums content by sums. -func (fis FileInfoSums) SortBySums() { - dups := fis.GetDuplicatePaths() - if len(dups) > 0 { - sort.Sort(bySum{fis, dups}) - } else { - sort.Sort(bySum{fis, nil}) - } -} - -// byName is a sort.Sort helper for sorting by file names. -// If names are the same, order them by their appearance in the tar archive -type byName struct{ FileInfoSums } - -func (bn byName) Less(i, j int) bool { - if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { - return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() - } - return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() -} - -// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive -type bySum struct { - FileInfoSums - dups FileInfoSums -} - -func (bs bySum) Less(i, j int) bool { - if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { - return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() - } - return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() -} - -// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order -type byPos struct{ FileInfoSums } - -func (bp byPos) Less(i, j int) bool { - return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go deleted file mode 100644 index 154788db8..000000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go +++ /dev/null @@ -1,295 +0,0 @@ -// Package tarsum provides algorithms to perform checksum calculation on -// filesystem layers. -// -// The transportation of filesystems, regarding Docker, is done with tar(1) -// archives. There are a variety of tar serialization formats [2], and a key -// concern here is ensuring a repeatable checksum given a set of inputs from a -// generic tar archive. Types of transportation include distribution to and from a -// registry endpoint, saving and loading through commands or Docker daemon APIs, -// transferring the build context from client to Docker daemon, and committing the -// filesystem of a container to become an image. -// -// As tar archives are used for transit, but not preserved in many situations, the -// focus of the algorithm is to ensure the integrity of the preserved filesystem, -// while maintaining a deterministic accountability. This includes neither -// constraining the ordering or manipulation of the files during the creation or -// unpacking of the archive, nor include additional metadata state about the file -// system attributes. -package tarsum - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "crypto" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "hash" - "io" - "path" - "strings" -) - -const ( - buf8K = 8 * 1024 - buf16K = 16 * 1024 - buf32K = 32 * 1024 -) - -// NewTarSum creates a new interface for calculating a fixed time checksum of a -// tar archive. -// -// This is used for calculating checksums of layers of an image, in some cases -// including the byte payload of the image's json metadata as well, and for -// calculating the checksums for buildcache. -func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { - return NewTarSumHash(r, dc, v, DefaultTHash) -} - -// NewTarSumHash creates a new TarSum, providing a THash to use rather than -// the DefaultTHash. -func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { - headerSelector, err := getTarHeaderSelector(v) - if err != nil { - return nil, err - } - ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} - err = ts.initTarSum() - return ts, err -} - -// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. -func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { - parts := strings.SplitN(label, "+", 2) - if len(parts) != 2 { - return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") - } - - versionName, hashName := parts[0], parts[1] - - version, ok := tarSumVersionsByName[versionName] - if !ok { - return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) - } - - hashConfig, ok := standardHashConfigs[hashName] - if !ok { - return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) - } - - tHash := NewTHash(hashConfig.name, hashConfig.hash.New) - - return NewTarSumHash(r, disableCompression, version, tHash) -} - -// TarSum is the generic interface for calculating fixed time -// checksums of a tar archive. -type TarSum interface { - io.Reader - GetSums() FileInfoSums - Sum([]byte) string - Version() Version - Hash() THash -} - -// tarSum struct is the structure for a Version0 checksum calculation. -type tarSum struct { - io.Reader - tarR *tar.Reader - tarW *tar.Writer - writer writeCloseFlusher - bufTar *bytes.Buffer - bufWriter *bytes.Buffer - bufData []byte - h hash.Hash - tHash THash - sums FileInfoSums - fileCounter int64 - currentFile string - finished bool - first bool - DisableCompression bool // false by default. When false, the output gzip compressed. - tarSumVersion Version // this field is not exported so it can not be mutated during use - headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive -} - -func (ts tarSum) Hash() THash { - return ts.tHash -} - -func (ts tarSum) Version() Version { - return ts.tarSumVersion -} - -// THash provides a hash.Hash type generator and its name. -type THash interface { - Hash() hash.Hash - Name() string -} - -// NewTHash is a convenience method for creating a THash. -func NewTHash(name string, h func() hash.Hash) THash { - return simpleTHash{n: name, h: h} -} - -type tHashConfig struct { - name string - hash crypto.Hash -} - -var ( - // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. - standardHashConfigs = map[string]tHashConfig{ - "sha256": {name: "sha256", hash: crypto.SHA256}, - "sha512": {name: "sha512", hash: crypto.SHA512}, - } -) - -// DefaultTHash is default TarSum hashing algorithm - "sha256". -var DefaultTHash = NewTHash("sha256", sha256.New) - -type simpleTHash struct { - n string - h func() hash.Hash -} - -func (sth simpleTHash) Name() string { return sth.n } -func (sth simpleTHash) Hash() hash.Hash { return sth.h() } - -func (ts *tarSum) encodeHeader(h *tar.Header) error { - for _, elem := range ts.headerSelector.selectHeaders(h) { - if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { - return err - } - } - return nil -} - -func (ts *tarSum) initTarSum() error { - ts.bufTar = bytes.NewBuffer([]byte{}) - ts.bufWriter = bytes.NewBuffer([]byte{}) - ts.tarR = tar.NewReader(ts.Reader) - ts.tarW = tar.NewWriter(ts.bufTar) - if !ts.DisableCompression { - ts.writer = gzip.NewWriter(ts.bufWriter) - } else { - ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} - } - if ts.tHash == nil { - ts.tHash = DefaultTHash - } - ts.h = ts.tHash.Hash() - ts.h.Reset() - ts.first = true - ts.sums = FileInfoSums{} - return nil -} - -func (ts *tarSum) Read(buf []byte) (int, error) { - if ts.finished { - return ts.bufWriter.Read(buf) - } - if len(ts.bufData) < len(buf) { - switch { - case len(buf) <= buf8K: - ts.bufData = make([]byte, buf8K) - case len(buf) <= buf16K: - ts.bufData = make([]byte, buf16K) - case len(buf) <= buf32K: - ts.bufData = make([]byte, buf32K) - default: - ts.bufData = make([]byte, len(buf)) - } - } - buf2 := ts.bufData[:len(buf)] - - n, err := ts.tarR.Read(buf2) - if err != nil { - if err == io.EOF { - if _, err := ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - if !ts.first { - ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) - ts.fileCounter++ - ts.h.Reset() - } else { - ts.first = false - } - - currentHeader, err := ts.tarR.Next() - if err != nil { - if err == io.EOF { - if err := ts.tarW.Close(); err != nil { - return 0, err - } - if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - if err := ts.writer.Close(); err != nil { - return 0, err - } - ts.finished = true - return n, nil - } - return n, err - } - ts.currentFile = path.Clean(currentHeader.Name) - if err := ts.encodeHeader(currentHeader); err != nil { - return 0, err - } - if err := ts.tarW.WriteHeader(currentHeader); err != nil { - return 0, err - } - if _, err := ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - ts.tarW.Flush() - if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - ts.writer.Flush() - - return ts.bufWriter.Read(buf) - } - return n, err - } - - // Filling the hash buffer - if _, err = ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - - // Filling the tar writer - if _, err = ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - ts.tarW.Flush() - - // Filling the output writer - if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - ts.writer.Flush() - - return ts.bufWriter.Read(buf) -} - -func (ts *tarSum) Sum(extra []byte) string { - ts.sums.SortBySums() - h := ts.tHash.Hash() - if extra != nil { - h.Write(extra) - } - for _, fis := range ts.sums { - h.Write([]byte(fis.Sum())) - } - checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) - return checksum -} - -func (ts *tarSum) GetSums() FileInfoSums { - return ts.sums -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go deleted file mode 100644 index a62cc3ebc..000000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go +++ /dev/null @@ -1,158 +0,0 @@ -package tarsum - -import ( - "archive/tar" - "errors" - "io" - "sort" - "strconv" - "strings" -) - -// Version is used for versioning of the TarSum algorithm -// based on the prefix of the hash used -// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" -type Version int - -// Prefix of "tarsum" -const ( - Version0 Version = iota - Version1 - // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation - VersionDev -) - -// WriteV1Header writes a tar header to a writer in V1 tarsum format. -func WriteV1Header(h *tar.Header, w io.Writer) { - for _, elem := range v1TarHeaderSelect(h) { - w.Write([]byte(elem[0] + elem[1])) - } -} - -// VersionLabelForChecksum returns the label for the given tarsum -// checksum, i.e., everything before the first `+` character in -// the string or an empty string if no label separator is found. -func VersionLabelForChecksum(checksum string) string { - // Checksums are in the form: {versionLabel}+{hashID}:{hex} - sepIndex := strings.Index(checksum, "+") - if sepIndex < 0 { - return "" - } - return checksum[:sepIndex] -} - -// GetVersions gets a list of all known tarsum versions. -func GetVersions() []Version { - v := []Version{} - for k := range tarSumVersions { - v = append(v, k) - } - return v -} - -var ( - tarSumVersions = map[Version]string{ - Version0: "tarsum", - Version1: "tarsum.v1", - VersionDev: "tarsum.dev", - } - tarSumVersionsByName = map[string]Version{ - "tarsum": Version0, - "tarsum.v1": Version1, - "tarsum.dev": VersionDev, - } -) - -func (tsv Version) String() string { - return tarSumVersions[tsv] -} - -// GetVersionFromTarsum returns the Version from the provided string. -func GetVersionFromTarsum(tarsum string) (Version, error) { - tsv := tarsum - if strings.Contains(tarsum, "+") { - tsv = strings.SplitN(tarsum, "+", 2)[0] - } - for v, s := range tarSumVersions { - if s == tsv { - return v, nil - } - } - return -1, ErrNotVersion -} - -// Errors that may be returned by functions in this package -var ( - ErrNotVersion = errors.New("string does not include a TarSum Version") - ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") -) - -// tarHeaderSelector is the interface which different versions -// of tarsum should use for selecting and ordering tar headers -// for each item in the archive. -type tarHeaderSelector interface { - selectHeaders(h *tar.Header) (orderedHeaders [][2]string) -} - -type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) - -func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { - return f(h) -} - -func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - return [][2]string{ - {"name", h.Name}, - {"mode", strconv.FormatInt(h.Mode, 10)}, - {"uid", strconv.Itoa(h.Uid)}, - {"gid", strconv.Itoa(h.Gid)}, - {"size", strconv.FormatInt(h.Size, 10)}, - {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, - {"typeflag", string([]byte{h.Typeflag})}, - {"linkname", h.Linkname}, - {"uname", h.Uname}, - {"gname", h.Gname}, - {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, - {"devminor", strconv.FormatInt(h.Devminor, 10)}, - } -} - -func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - // Get extended attributes. - xAttrKeys := make([]string, len(h.Xattrs)) - for k := range h.Xattrs { - xAttrKeys = append(xAttrKeys, k) - } - sort.Strings(xAttrKeys) - - // Make the slice with enough capacity to hold the 11 basic headers - // we want from the v0 selector plus however many xattrs we have. - orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) - - // Copy all headers from v0 excluding the 'mtime' header (the 5th element). - v0headers := v0TarHeaderSelect(h) - orderedHeaders = append(orderedHeaders, v0headers[0:5]...) - orderedHeaders = append(orderedHeaders, v0headers[6:]...) - - // Finally, append the sorted xattrs. - for _, k := range xAttrKeys { - orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) - } - - return -} - -var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ - Version0: v0TarHeaderSelect, - Version1: v1TarHeaderSelect, - VersionDev: v1TarHeaderSelect, -} - -func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { - headerSelector, ok := registeredHeaderSelectors[v] - if !ok { - return nil, ErrVersionNotImplemented - } - - return headerSelector, nil -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go deleted file mode 100644 index 9727ecde3..000000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go +++ /dev/null @@ -1,22 +0,0 @@ -package tarsum - -import ( - "io" -) - -type writeCloseFlusher interface { - io.WriteCloser - Flush() error -} - -type nopCloseFlusher struct { - io.Writer -} - -func (n *nopCloseFlusher) Close() error { - return nil -} - -func (n *nopCloseFlusher) Flush() error { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go deleted file mode 100644 index f5262bccf..000000000 --- a/vendor/github.com/docker/docker/pkg/term/ascii.go +++ /dev/null @@ -1,66 +0,0 @@ -package term - -import ( - "fmt" - "strings" -) - -// ASCII list the possible supported ASCII key sequence -var ASCII = []string{ - "ctrl-@", - "ctrl-a", - "ctrl-b", - "ctrl-c", - "ctrl-d", - "ctrl-e", - "ctrl-f", - "ctrl-g", - "ctrl-h", - "ctrl-i", - "ctrl-j", - "ctrl-k", - "ctrl-l", - "ctrl-m", - "ctrl-n", - "ctrl-o", - "ctrl-p", - "ctrl-q", - "ctrl-r", - "ctrl-s", - "ctrl-t", - "ctrl-u", - "ctrl-v", - "ctrl-w", - "ctrl-x", - "ctrl-y", - "ctrl-z", - "ctrl-[", - "ctrl-\\", - "ctrl-]", - "ctrl-^", - "ctrl-_", -} - -// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. -func ToBytes(keys string) ([]byte, error) { - codes := []byte{} -next: - for _, key := range strings.Split(keys, ",") { - if len(key) != 1 { - for code, ctrl := range ASCII { - if ctrl == key { - codes = append(codes, byte(code)) - continue next - } - } - if key == "DEL" { - codes = append(codes, 127) - } else { - return nil, fmt.Errorf("Unknown character: '%s'", key) - } - } else { - codes = append(codes, byte(key[0])) - } - } - return codes, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go deleted file mode 100644 index 59dac5ba8..000000000 --- a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build linux,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_other.go b/vendor/github.com/docker/docker/pkg/term/tc_other.go deleted file mode 100644 index 750d7c3f6..000000000 --- a/vendor/github.com/docker/docker/pkg/term/tc_other.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows -// +build !linux !cgo -// +build !solaris !cgo - -package term - -import ( - "syscall" - "unsafe" -) - -func tcget(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) - return err -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go deleted file mode 100644 index c9139d0ca..000000000 --- a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build solaris,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 - - /* - VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned - Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It - needs to be explicitly set to 1. - */ - newState.Cc[C.VMIN] = 1 - newState.Cc[C.VTIME] = 0 - - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go deleted file mode 100644 index 816f8d752..000000000 --- a/vendor/github.com/docker/docker/pkg/term/term.go +++ /dev/null @@ -1,123 +0,0 @@ -// +build !windows - -// Package term provides structures and helper functions to work with -// terminal (state, sizes). -package term - -import ( - "errors" - "fmt" - "io" - "os" - "os/signal" - "syscall" -) - -var ( - // ErrInvalidState is returned if the state of the terminal is invalid. - ErrInvalidState = errors.New("Invalid terminal state") -) - -// State represents the state of the terminal. -type State struct { - termios Termios -} - -// Winsize represents the size of the terminal window. -type Winsize struct { - Height uint16 - Width uint16 - x uint16 - y uint16 -} - -// StdStreams returns the standard streams (stdin, stdout, stderr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - return inFd, isTerminalIn -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - var termios Termios - return tcget(fd, &termios) == 0 -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - if state == nil { - return ErrInvalidState - } - if err := tcset(fd, &state.termios); err != 0 { - return err - } - return nil -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - return &oldState, nil -} - -// DisableEcho applies the specified state to the terminal connected to the file -// descriptor, with echo disabled. -func DisableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= syscall.ECHO - - if err := tcset(fd, &newState); err != 0 { - return err - } - handleInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - handleInterrupt(fd, oldState) - return oldState, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - return nil, nil -} - -func handleInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - go func() { - for range sigchan { - // quit cleanly and the new terminal item is on a new line - fmt.Println() - signal.Stop(sigchan) - close(sigchan) - RestoreTerminal(fd, state) - os.Exit(1) - } - }() -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_solaris.go b/vendor/github.com/docker/docker/pkg/term/term_solaris.go deleted file mode 100644 index 112debbec..000000000 --- a/vendor/github.com/docker/docker/pkg/term/term_solaris.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build solaris - -package term - -import ( - "syscall" - "unsafe" -) - -/* -#include -#include -#include - -// Small wrapper to get rid of variadic args of ioctl() -int my_ioctl(int fd, int cmd, struct winsize *ws) { - return ioctl(fd, cmd, ws); -} -*/ -import "C" - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return nil - } - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_unix.go b/vendor/github.com/docker/docker/pkg/term/term_unix.go deleted file mode 100644 index ddf87a0e5..000000000 --- a/vendor/github.com/docker/docker/pkg/term/term_unix.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !solaris,!windows - -package term - -import ( - "syscall" - "unsafe" -) - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return nil - } - return err -} diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go deleted file mode 100644 index 62e9a9e69..000000000 --- a/vendor/github.com/docker/docker/pkg/term/term_windows.go +++ /dev/null @@ -1,233 +0,0 @@ -// +build windows - -package term - -import ( - "io" - "os" - "os/signal" - "syscall" - - "github.com/Azure/go-ansiterm/winterm" - "github.com/docker/docker/pkg/term/windows" -) - -// State holds the console mode for the terminal. -type State struct { - mode uint32 -} - -// Winsize is used for window size. -type Winsize struct { - Height uint16 - Width uint16 -} - -const ( - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx - enableVirtualTerminalInput = 0x0200 - enableVirtualTerminalProcessing = 0x0004 - disableNewlineAutoReturn = 0x0008 -) - -// vtInputSupported is true if enableVirtualTerminalInput is supported by the console -var vtInputSupported bool - -// StdStreams returns the standard streams (stdin, stdout, stderr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - // Turn on VT handling on all std handles, if possible. This might - // fail, in which case we will fall back to terminal emulation. - var emulateStdin, emulateStdout, emulateStderr bool - fd := os.Stdin.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate that enableVirtualTerminalInput is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil { - emulateStdin = true - } else { - vtInputSupported = true - } - // Unconditionally set the console mode back even on failure because SetConsoleMode - // remembers invalid bits on input handles. - winterm.SetConsoleMode(fd, mode) - } - - fd = os.Stdout.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate disableNewlineAutoReturn is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { - emulateStdout = true - } else { - winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) - } - } - - fd = os.Stderr.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate disableNewlineAutoReturn is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { - emulateStderr = true - } else { - winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) - } - } - - if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" { - // The ConEmu and ConsoleZ terminals emulate ANSI on output streams well. - emulateStdin = true - emulateStdout = false - emulateStderr = false - } - - if emulateStdin { - stdIn = windows.NewAnsiReader(syscall.STD_INPUT_HANDLE) - } else { - stdIn = os.Stdin - } - - if emulateStdout { - stdOut = windows.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) - } else { - stdOut = os.Stdout - } - - if emulateStderr { - stdErr = windows.NewAnsiWriter(syscall.STD_ERROR_HANDLE) - } else { - stdErr = os.Stderr - } - - return -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - return windows.GetHandleInfo(in) -} - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - return windows.IsConsole(fd) -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - return winterm.SetConsoleMode(fd, state.mode) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - mode, e := winterm.GetConsoleMode(fd) - if e != nil { - return nil, e - } - - return &State{mode: mode}, nil -} - -// DisableEcho disables echo for the terminal connected to the given file descriptor. -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx -func DisableEcho(fd uintptr, state *State) error { - mode := state.mode - mode &^= winterm.ENABLE_ECHO_INPUT - mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT - err := winterm.SetConsoleMode(fd, mode) - if err != nil { - return err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - state, err := MakeRaw(fd) - if err != nil { - return nil, err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return state, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - // Ignore failures, since disableNewlineAutoReturn might not be supported on this - // version of Windows. - winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn) - return state, err -} - -// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - mode := state.mode - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= winterm.ENABLE_ECHO_INPUT - mode &^= winterm.ENABLE_LINE_INPUT - mode &^= winterm.ENABLE_MOUSE_INPUT - mode &^= winterm.ENABLE_WINDOW_INPUT - mode &^= winterm.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= winterm.ENABLE_EXTENDED_FLAGS - mode |= winterm.ENABLE_INSERT_MODE - mode |= winterm.ENABLE_QUICK_EDIT_MODE - if vtInputSupported { - mode |= enableVirtualTerminalInput - } - - err = winterm.SetConsoleMode(fd, mode) - if err != nil { - return nil, err - } - return state, nil -} - -func restoreAtInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_darwin.go b/vendor/github.com/docker/docker/pkg/term/termios_darwin.go deleted file mode 100644 index 480db900a..000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_darwin.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint64 - Oflag uint64 - Cflag uint64 - Lflag uint64 - Cc [20]byte - Ispeed uint64 - Ospeed uint64 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go b/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go deleted file mode 100644 index ed843ad69..000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go deleted file mode 100644 index 22921b6ae..000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_linux.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build !cgo - -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TCGETS - setTermios = syscall.TCSETS -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go b/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go deleted file mode 100644 index ed843ad69..000000000 --- a/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go deleted file mode 100644 index cb0b88356..000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go +++ /dev/null @@ -1,263 +0,0 @@ -// +build windows - -package windows - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "strings" - "unsafe" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -const ( - escapeSequence = ansiterm.KEY_ESC_CSI -) - -// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. -type ansiReader struct { - file *os.File - fd uintptr - buffer []byte - cbBuffer int - command []byte -} - -// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a -// Windows console input handle. -func NewAnsiReader(nFile int) io.ReadCloser { - initLogger() - file, fd := winterm.GetStdFile(nFile) - return &ansiReader{ - file: file, - fd: fd, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - buffer: make([]byte, 0), - } -} - -// Close closes the wrapped file. -func (ar *ansiReader) Close() (err error) { - return ar.file.Close() -} - -// Fd returns the file descriptor of the wrapped file. -func (ar *ansiReader) Fd() uintptr { - return ar.fd -} - -// Read reads up to len(p) bytes of translated input events into p. -func (ar *ansiReader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - // Previously read bytes exist, read as much as we can and return - if len(ar.buffer) > 0 { - logger.Debugf("Reading previously cached bytes") - - originalLength := len(ar.buffer) - copiedLength := copy(p, ar.buffer) - - if copiedLength == originalLength { - ar.buffer = make([]byte, 0, len(p)) - } else { - ar.buffer = ar.buffer[copiedLength:] - } - - logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) - return copiedLength, nil - } - - // Read and translate key events - events, err := readInputEvents(ar.fd, len(p)) - if err != nil { - return 0, err - } else if len(events) == 0 { - logger.Debug("No input events detected") - return 0, nil - } - - keyBytes := translateKeyEvents(events, []byte(escapeSequence)) - - // Save excess bytes and right-size keyBytes - if len(keyBytes) > len(p) { - logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) - ar.buffer = keyBytes[len(p):] - keyBytes = keyBytes[:len(p)] - } else if len(keyBytes) == 0 { - logger.Debug("No key bytes returned from the translator") - return 0, nil - } - - copiedLength := copy(p, keyBytes) - if copiedLength != len(keyBytes) { - return 0, errors.New("unexpected copy length encountered") - } - - logger.Debugf("Read p[%d]: % x", copiedLength, p) - logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) - return copiedLength, nil -} - -// readInputEvents polls until at least one event is available. -func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { - // Determine the maximum number of records to retrieve - // -- Cast around the type system to obtain the size of a single INPUT_RECORD. - // unsafe.Sizeof requires an expression vs. a type-reference; the casting - // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) - countRecords := maxBytes / recordSize - if countRecords > ansiterm.MAX_INPUT_EVENTS { - countRecords = ansiterm.MAX_INPUT_EVENTS - } else if countRecords == 0 { - countRecords = 1 - } - logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) - - // Wait for and read input events - events := make([]winterm.INPUT_RECORD, countRecords) - nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) - if err != nil { - return nil, err - } - - if eventsExist { - err = winterm.ReadConsoleInput(fd, events, &nEvents) - if err != nil { - return nil, err - } - } - - // Return a slice restricted to the number of returned records - logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) - return events[:nEvents], nil -} - -// KeyEvent Translation Helpers - -var arrowKeyMapPrefix = map[uint16]string{ - winterm.VK_UP: "%s%sA", - winterm.VK_DOWN: "%s%sB", - winterm.VK_RIGHT: "%s%sC", - winterm.VK_LEFT: "%s%sD", -} - -var keyMapPrefix = map[uint16]string{ - winterm.VK_UP: "\x1B[%sA", - winterm.VK_DOWN: "\x1B[%sB", - winterm.VK_RIGHT: "\x1B[%sC", - winterm.VK_LEFT: "\x1B[%sD", - winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 - winterm.VK_INSERT: "\x1B[2%s~", - winterm.VK_DELETE: "\x1B[3%s~", - winterm.VK_PRIOR: "\x1B[5%s~", - winterm.VK_NEXT: "\x1B[6%s~", - winterm.VK_F1: "", - winterm.VK_F2: "", - winterm.VK_F3: "\x1B[13%s~", - winterm.VK_F4: "\x1B[14%s~", - winterm.VK_F5: "\x1B[15%s~", - winterm.VK_F6: "\x1B[17%s~", - winterm.VK_F7: "\x1B[18%s~", - winterm.VK_F8: "\x1B[19%s~", - winterm.VK_F9: "\x1B[20%s~", - winterm.VK_F10: "\x1B[21%s~", - winterm.VK_F11: "\x1B[23%s~", - winterm.VK_F12: "\x1B[24%s~", -} - -// translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { - var buffer bytes.Buffer - for _, event := range events { - if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { - buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) - } - } - - return buffer.Bytes() -} - -// keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { - if keyEvent.UnicodeChar == 0 { - return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - - } - - // +Key generates ESC N Key - if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) - } - - return string(keyEvent.UnicodeChar) -} - -// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { - shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control) - - if format, ok := arrowKeyMapPrefix[key]; ok { - return fmt.Sprintf(format, escapeSequence, modifier) - } - - if format, ok := keyMapPrefix[key]; ok { - return fmt.Sprintf(format, modifier) - } - - return "" -} - -// getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState uint32) (shift, alt, control bool) { - shift = 0 != (controlState & winterm.SHIFT_PRESSED) - alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control bool) string { - if shift && alt && control { - return ansiterm.KEY_CONTROL_PARAM_8 - } - if alt && control { - return ansiterm.KEY_CONTROL_PARAM_7 - } - if shift && control { - return ansiterm.KEY_CONTROL_PARAM_6 - } - if control { - return ansiterm.KEY_CONTROL_PARAM_5 - } - if shift && alt { - return ansiterm.KEY_CONTROL_PARAM_4 - } - if alt { - return ansiterm.KEY_CONTROL_PARAM_3 - } - if shift { - return ansiterm.KEY_CONTROL_PARAM_2 - } - return "" -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go deleted file mode 100644 index a3ce5697d..000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build windows - -package windows - -import ( - "io" - "os" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. -type ansiWriter struct { - file *os.File - fd uintptr - infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO - command []byte - escapeSequence []byte - inAnsiSequence bool - parser *ansiterm.AnsiParser -} - -// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a -// Windows console output handle. -func NewAnsiWriter(nFile int) io.Writer { - initLogger() - file, fd := winterm.GetStdFile(nFile) - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - logger.Infof("newAnsiWriter: parser %p", parser) - - aw := &ansiWriter{ - file: file, - fd: fd, - infoReset: info, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - parser: parser, - } - - logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) - logger.Infof("newAnsiWriter: %v", aw) - return aw -} - -func (aw *ansiWriter) Fd() uintptr { - return aw.fd -} - -// Write writes len(p) bytes from p to the underlying data stream. -func (aw *ansiWriter) Write(p []byte) (total int, err error) { - if len(p) == 0 { - return 0, nil - } - - logger.Infof("Write: % x", p) - logger.Infof("Write: %s", string(p)) - return aw.parser.Parse(p) -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go deleted file mode 100644 index ca5c3b2e5..000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/console.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build windows - -package windows - -import ( - "os" - - "github.com/Azure/go-ansiterm/winterm" -) - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. -func GetHandleInfo(in interface{}) (uintptr, bool) { - switch t := in.(type) { - case *ansiReader: - return t.Fd(), true - case *ansiWriter: - return t.Fd(), true - } - - var inFd uintptr - var isTerminal bool - - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminal = IsConsole(inFd) - } - return inFd, isTerminal -} - -// IsConsole returns true if the given file descriptor is a Windows Console. -// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -func IsConsole(fd uintptr) bool { - _, e := winterm.GetConsoleMode(fd) - return e == nil -} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go deleted file mode 100644 index ce4cb5990..000000000 --- a/vendor/github.com/docker/docker/pkg/term/windows/windows.go +++ /dev/null @@ -1,33 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windows - -import ( - "io/ioutil" - "os" - "sync" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger -var initOnce sync.Once - -func initLogger() { - initOnce.Do(func() { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiReaderWriter.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - }) -} diff --git a/vendor/github.com/docker/docker/pkg/useragent/README.md b/vendor/github.com/docker/docker/pkg/useragent/README.md deleted file mode 100644 index d9cb367d1..000000000 --- a/vendor/github.com/docker/docker/pkg/useragent/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions to pack version information into a single User-Agent header. diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent.go b/vendor/github.com/docker/docker/pkg/useragent/useragent.go deleted file mode 100644 index 1137db51b..000000000 --- a/vendor/github.com/docker/docker/pkg/useragent/useragent.go +++ /dev/null @@ -1,55 +0,0 @@ -// Package useragent provides helper functions to pack -// version information into a single User-Agent header. -package useragent - -import ( - "strings" -) - -// VersionInfo is used to model UserAgent versions. -type VersionInfo struct { - Name string - Version string -} - -func (vi *VersionInfo) isValid() bool { - const stopChars = " \t\r\n/" - name := vi.Name - vers := vi.Version - if len(name) == 0 || strings.ContainsAny(name, stopChars) { - return false - } - if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { - return false - } - return true -} - -// AppendVersions converts versions to a string and appends the string to the string base. -// -// Each VersionInfo will be converted to a string in the format of -// "product/version", where the "product" is get from the name field, while -// version is get from the version field. Several pieces of version information -// will be concatenated and separated by space. -// -// Example: -// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) -// results in "base foo/1.0 bar/2.0". -func AppendVersions(base string, versions ...VersionInfo) string { - if len(versions) == 0 { - return base - } - - verstrs := make([]string, 0, 1+len(versions)) - if len(base) > 0 { - verstrs = append(verstrs, base) - } - - for _, v := range versions { - if !v.isValid() { - continue - } - verstrs = append(verstrs, v.Name+"/"+v.Version) - } - return strings.Join(verstrs, " ") -} diff --git a/vendor/github.com/docker/docker/registry/auth.go b/vendor/github.com/docker/docker/registry/auth.go deleted file mode 100644 index 8cadd51ba..000000000 --- a/vendor/github.com/docker/docker/registry/auth.go +++ /dev/null @@ -1,303 +0,0 @@ -package registry - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" -) - -const ( - // AuthClientID is used the ClientID used for the token server - AuthClientID = "docker" -) - -// loginV1 tries to register/login to the v1 registry server. -func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, string, error) { - registryEndpoint, err := apiEndpoint.ToV1Endpoint(userAgent, nil) - if err != nil { - return "", "", err - } - - serverAddress := registryEndpoint.String() - - logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress) - - if serverAddress == "" { - return "", "", fmt.Errorf("Server Error: Server Address not set.") - } - - loginAgainstOfficialIndex := serverAddress == IndexServer - - req, err := http.NewRequest("GET", serverAddress+"users/", nil) - if err != nil { - return "", "", err - } - req.SetBasicAuth(authConfig.Username, authConfig.Password) - resp, err := registryEndpoint.client.Do(req) - if err != nil { - // fallback when request could not be completed - return "", "", fallbackError{ - err: err, - } - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", "", err - } - if resp.StatusCode == http.StatusOK { - return "Login Succeeded", "", nil - } else if resp.StatusCode == http.StatusUnauthorized { - if loginAgainstOfficialIndex { - return "", "", fmt.Errorf("Wrong login/password, please try again. Haven't got a Docker ID? Create one at https://hub.docker.com") - } - return "", "", fmt.Errorf("Wrong login/password, please try again") - } else if resp.StatusCode == http.StatusForbidden { - if loginAgainstOfficialIndex { - return "", "", fmt.Errorf("Login: Account is not active. Please check your e-mail for a confirmation link.") - } - // *TODO: Use registry configuration to determine what this says, if anything? - return "", "", fmt.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) - } else if resp.StatusCode == http.StatusInternalServerError { // Issue #14326 - logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) - return "", "", fmt.Errorf("Internal Server Error") - } - return "", "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, - resp.StatusCode, resp.Header) -} - -type loginCredentialStore struct { - authConfig *types.AuthConfig -} - -func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { - return lcs.authConfig.Username, lcs.authConfig.Password -} - -func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { - return lcs.authConfig.IdentityToken -} - -func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { - lcs.authConfig.IdentityToken = token -} - -type staticCredentialStore struct { - auth *types.AuthConfig -} - -// NewStaticCredentialStore returns a credential store -// which always returns the same credential values. -func NewStaticCredentialStore(auth *types.AuthConfig) auth.CredentialStore { - return staticCredentialStore{ - auth: auth, - } -} - -func (scs staticCredentialStore) Basic(*url.URL) (string, string) { - if scs.auth == nil { - return "", "" - } - return scs.auth.Username, scs.auth.Password -} - -func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { - if scs.auth == nil { - return "" - } - return scs.auth.IdentityToken -} - -func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { -} - -type fallbackError struct { - err error -} - -func (err fallbackError) Error() string { - return err.err.Error() -} - -// loginV2 tries to login to the v2 registry server. The given registry -// endpoint will be pinged to get authorization challenges. These challenges -// will be used to authenticate against the registry to validate credentials. -func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { - logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/") - - modifiers := DockerHeaders(userAgent, nil) - authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...) - - credentialAuthConfig := *authConfig - creds := loginCredentialStore{ - authConfig: &credentialAuthConfig, - } - - loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) - if err != nil { - return "", "", err - } - - endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" - req, err := http.NewRequest("GET", endpointStr, nil) - if err != nil { - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err - } - - resp, err := loginClient.Do(req) - if err != nil { - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - // TODO(dmcgowan): Attempt to further interpret result, status code and error code string - err := fmt.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err - } - - return "Login Succeeded", credentialAuthConfig.IdentityToken, nil - -} - -func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) { - challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport) - if err != nil { - if !foundV2 { - err = fallbackError{err: err} - } - return nil, foundV2, err - } - - tokenHandlerOptions := auth.TokenHandlerOptions{ - Transport: authTransport, - Credentials: creds, - OfflineAccess: true, - ClientID: AuthClientID, - Scopes: scopes, - } - tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) - tr := transport.NewTransport(authTransport, modifiers...) - - return &http.Client{ - Transport: tr, - Timeout: 15 * time.Second, - }, foundV2, nil - -} - -// ConvertToHostname converts a registry url which has http|https prepended -// to just an hostname. -func ConvertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.TrimPrefix(url, "http://") - } else if strings.HasPrefix(url, "https://") { - stripped = strings.TrimPrefix(url, "https://") - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] -} - -// ResolveAuthConfig matches an auth configuration to a server address or a URL -func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { - configKey := GetAuthConfigKey(index) - // First try the happy case - if c, found := authConfigs[configKey]; found || index.Official { - return c - } - - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for registry, ac := range authConfigs { - if configKey == ConvertToHostname(registry) { - return ac - } - } - - // When all else fails, return an empty auth config - return types.AuthConfig{} -} - -// PingResponseError is used when the response from a ping -// was received but invalid. -type PingResponseError struct { - Err error -} - -func (err PingResponseError) Error() string { - return err.Err.Error() -} - -// PingV2Registry attempts to ping a v2 registry and on success return a -// challenge manager for the supported authentication types and -// whether v2 was confirmed by the response. If a response is received but -// cannot be interpreted a PingResponseError will be returned. -func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) { - var ( - foundV2 = false - v2Version = auth.APIVersion{ - Type: "registry", - Version: "2.0", - } - ) - - pingClient := &http.Client{ - Transport: transport, - Timeout: 15 * time.Second, - } - endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" - req, err := http.NewRequest("GET", endpointStr, nil) - if err != nil { - return nil, false, err - } - resp, err := pingClient.Do(req) - if err != nil { - return nil, false, err - } - defer resp.Body.Close() - - versions := auth.APIVersions(resp, DefaultRegistryVersionHeader) - for _, pingVersion := range versions { - if pingVersion == v2Version { - // The version header indicates we're definitely - // talking to a v2 registry. So don't allow future - // fallbacks to the v1 protocol. - - foundV2 = true - break - } - } - - challengeManager := challenge.NewSimpleManager() - if err := challengeManager.AddResponse(resp); err != nil { - return nil, foundV2, PingResponseError{ - Err: err, - } - } - - return challengeManager, foundV2, nil -} diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go deleted file mode 100644 index 7b6703f57..000000000 --- a/vendor/github.com/docker/docker/registry/config.go +++ /dev/null @@ -1,389 +0,0 @@ -package registry - -import ( - "fmt" - "net" - "net/url" - "regexp" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/reference" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/opts" - "github.com/pkg/errors" - "github.com/spf13/pflag" -) - -// ServiceOptions holds command line options. -type ServiceOptions struct { - Mirrors []string `json:"registry-mirrors,omitempty"` - InsecureRegistries []string `json:"insecure-registries,omitempty"` - - // V2Only controls access to legacy registries. If it is set to true via the - // command line flag the daemon will not attempt to contact v1 legacy registries - V2Only bool `json:"disable-legacy-registry,omitempty"` -} - -// serviceConfig holds daemon configuration for the registry service. -type serviceConfig struct { - registrytypes.ServiceConfig - V2Only bool -} - -var ( - // DefaultNamespace is the default namespace - DefaultNamespace = "docker.io" - // DefaultRegistryVersionHeader is the name of the default HTTP header - // that carries Registry version info - DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" - - // IndexHostname is the index hostname - IndexHostname = "index.docker.io" - // IndexServer is used for user auth and image search - IndexServer = "https://" + IndexHostname + "/v1/" - // IndexName is the name of the index - IndexName = "docker.io" - - // NotaryServer is the endpoint serving the Notary trust server - NotaryServer = "https://notary.docker.io" - - // DefaultV2Registry is the URI of the default v2 registry - DefaultV2Registry = &url.URL{ - Scheme: "https", - Host: "registry-1.docker.io", - } -) - -var ( - // ErrInvalidRepositoryName is an error returned if the repository name did - // not have the correct form - ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") - - emptyServiceConfig = newServiceConfig(ServiceOptions{}) -) - -var ( - validHostPortRegex = regexp.MustCompile(`^` + reference.DomainRegexp.String() + `$`) -) - -// for mocking in unit tests -var lookupIP = net.LookupIP - -// InstallCliFlags adds command-line options to the top-level flag parser for -// the current process. -func (options *ServiceOptions) InstallCliFlags(flags *pflag.FlagSet) { - mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, ValidateMirror) - insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, ValidateIndexName) - - flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") - flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") - - options.installCliPlatformFlags(flags) -} - -// newServiceConfig returns a new instance of ServiceConfig -func newServiceConfig(options ServiceOptions) *serviceConfig { - config := &serviceConfig{ - ServiceConfig: registrytypes.ServiceConfig{ - InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), - IndexConfigs: make(map[string]*registrytypes.IndexInfo, 0), - // Hack: Bypass setting the mirrors to IndexConfigs since they are going away - // and Mirrors are only for the official registry anyways. - }, - V2Only: options.V2Only, - } - - config.LoadMirrors(options.Mirrors) - config.LoadInsecureRegistries(options.InsecureRegistries) - - return config -} - -// LoadMirrors loads mirrors to config, after removing duplicates. -// Returns an error if mirrors contains an invalid mirror. -func (config *serviceConfig) LoadMirrors(mirrors []string) error { - mMap := map[string]struct{}{} - unique := []string{} - - for _, mirror := range mirrors { - m, err := ValidateMirror(mirror) - if err != nil { - return err - } - if _, exist := mMap[m]; !exist { - mMap[m] = struct{}{} - unique = append(unique, m) - } - } - - config.Mirrors = unique - - // Configure public registry since mirrors may have changed. - config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ - Name: IndexName, - Mirrors: config.Mirrors, - Secure: true, - Official: true, - } - - return nil -} - -// LoadInsecureRegistries loads insecure registries to config -func (config *serviceConfig) LoadInsecureRegistries(registries []string) error { - // Localhost is by default considered as an insecure registry - // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). - // - // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change - // daemon flags on boot2docker? - registries = append(registries, "127.0.0.0/8") - - // Store original InsecureRegistryCIDRs and IndexConfigs - // Clean InsecureRegistryCIDRs and IndexConfigs in config, as passed registries has all insecure registry info. - originalCIDRs := config.ServiceConfig.InsecureRegistryCIDRs - originalIndexInfos := config.ServiceConfig.IndexConfigs - - config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0) - config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo, 0) - -skip: - for _, r := range registries { - // validate insecure registry - if _, err := ValidateIndexName(r); err != nil { - // before returning err, roll back to original data - config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs - config.ServiceConfig.IndexConfigs = originalIndexInfos - return err - } - if strings.HasPrefix(strings.ToLower(r), "http://") { - logrus.Warnf("insecure registry %s should not contain 'http://' and 'http://' has been removed from the insecure registry config", r) - r = r[7:] - } else if strings.HasPrefix(strings.ToLower(r), "https://") { - logrus.Warnf("insecure registry %s should not contain 'https://' and 'https://' has been removed from the insecure registry config", r) - r = r[8:] - } else if validateNoScheme(r) != nil { - // Insecure registry should not contain '://' - // before returning err, roll back to original data - config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs - config.ServiceConfig.IndexConfigs = originalIndexInfos - return fmt.Errorf("insecure registry %s should not contain '://'", r) - } - // Check if CIDR was passed to --insecure-registry - _, ipnet, err := net.ParseCIDR(r) - if err == nil { - // Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip. - data := (*registrytypes.NetIPNet)(ipnet) - for _, value := range config.InsecureRegistryCIDRs { - if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() { - continue skip - } - } - // ipnet is not found, add it in config.InsecureRegistryCIDRs - config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, data) - - } else { - if err := validateHostPort(r); err != nil { - config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs - config.ServiceConfig.IndexConfigs = originalIndexInfos - return fmt.Errorf("insecure registry %s is not valid: %v", r, err) - - } - // Assume `host:port` if not CIDR. - config.IndexConfigs[r] = ®istrytypes.IndexInfo{ - Name: r, - Mirrors: make([]string, 0), - Secure: false, - Official: false, - } - } - } - - // Configure public registry. - config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ - Name: IndexName, - Mirrors: config.Mirrors, - Secure: true, - Official: true, - } - - return nil -} - -// isSecureIndex returns false if the provided indexName is part of the list of insecure registries -// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. -// -// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. -// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered -// insecure. -// -// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name -// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained -// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element -// of insecureRegistries. -func isSecureIndex(config *serviceConfig, indexName string) bool { - // Check for configured index, first. This is needed in case isSecureIndex - // is called from anything besides newIndexInfo, in order to honor per-index configurations. - if index, ok := config.IndexConfigs[indexName]; ok { - return index.Secure - } - - host, _, err := net.SplitHostPort(indexName) - if err != nil { - // assume indexName is of the form `host` without the port and go on. - host = indexName - } - - addrs, err := lookupIP(host) - if err != nil { - ip := net.ParseIP(host) - if ip != nil { - addrs = []net.IP{ip} - } - - // if ip == nil, then `host` is neither an IP nor it could be looked up, - // either because the index is unreachable, or because the index is behind an HTTP proxy. - // So, len(addrs) == 0 and we're not aborting. - } - - // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. - for _, addr := range addrs { - for _, ipnet := range config.InsecureRegistryCIDRs { - // check if the addr falls in the subnet - if (*net.IPNet)(ipnet).Contains(addr) { - return false - } - } - } - - return true -} - -// ValidateMirror validates an HTTP(S) registry mirror -func ValidateMirror(val string) (string, error) { - uri, err := url.Parse(val) - if err != nil { - return "", fmt.Errorf("invalid mirror: %q is not a valid URI", val) - } - if uri.Scheme != "http" && uri.Scheme != "https" { - return "", fmt.Errorf("invalid mirror: unsupported scheme %q in %q", uri.Scheme, uri) - } - if (uri.Path != "" && uri.Path != "/") || uri.RawQuery != "" || uri.Fragment != "" { - return "", fmt.Errorf("invalid mirror: path, query, or fragment at end of the URI %q", uri) - } - if uri.User != nil { - // strip password from output - uri.User = url.UserPassword(uri.User.Username(), "xxxxx") - return "", fmt.Errorf("invalid mirror: username/password not allowed in URI %q", uri) - } - return strings.TrimSuffix(val, "/") + "/", nil -} - -// ValidateIndexName validates an index name. -func ValidateIndexName(val string) (string, error) { - // TODO: upstream this to check to reference package - if val == "index.docker.io" { - val = "docker.io" - } - if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { - return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val) - } - return val, nil -} - -func validateNoScheme(reposName string) error { - if strings.Contains(reposName, "://") { - // It cannot contain a scheme! - return ErrInvalidRepositoryName - } - return nil -} - -func validateHostPort(s string) error { - // Split host and port, and in case s can not be splitted, assume host only - host, port, err := net.SplitHostPort(s) - if err != nil { - host = s - port = "" - } - // If match against the `host:port` pattern fails, - // it might be `IPv6:port`, which will be captured by net.ParseIP(host) - if !validHostPortRegex.MatchString(s) && net.ParseIP(host) == nil { - return fmt.Errorf("invalid host %q", host) - } - if port != "" { - v, err := strconv.Atoi(port) - if err != nil { - return err - } - if v < 0 || v > 65535 { - return fmt.Errorf("invalid port %q", port) - } - } - return nil -} - -// newIndexInfo returns IndexInfo configuration from indexName -func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) { - var err error - indexName, err = ValidateIndexName(indexName) - if err != nil { - return nil, err - } - - // Return any configured index info, first. - if index, ok := config.IndexConfigs[indexName]; ok { - return index, nil - } - - // Construct a non-configured index info. - index := ®istrytypes.IndexInfo{ - Name: indexName, - Mirrors: make([]string, 0), - Official: false, - } - index.Secure = isSecureIndex(config, indexName) - return index, nil -} - -// GetAuthConfigKey special-cases using the full index address of the official -// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. -func GetAuthConfigKey(index *registrytypes.IndexInfo) string { - if index.Official { - return IndexServer - } - return index.Name -} - -// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo -func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { - index, err := newIndexInfo(config, reference.Domain(name)) - if err != nil { - return nil, err - } - official := !strings.ContainsRune(reference.FamiliarName(name), '/') - - return &RepositoryInfo{ - Name: reference.TrimNamed(name), - Index: index, - Official: official, - }, nil -} - -// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but -// lacks registry configuration. -func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { - return newRepositoryInfo(emptyServiceConfig, reposName) -} - -// ParseSearchIndexInfo will use repository name to get back an indexInfo. -func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { - indexName, _ := splitReposSearchTerm(reposName) - - indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) - if err != nil { - return nil, err - } - return indexInfo, nil -} diff --git a/vendor/github.com/docker/docker/registry/config_unix.go b/vendor/github.com/docker/docker/registry/config_unix.go deleted file mode 100644 index d692e8ef5..000000000 --- a/vendor/github.com/docker/docker/registry/config_unix.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build !windows - -package registry - -import ( - "github.com/spf13/pflag" -) - -var ( - // CertsDir is the directory where certificates are stored - CertsDir = "/etc/docker/certs.d" -) - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:/index.docker.io/v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return s -} - -// installCliPlatformFlags handles any platform specific flags for the service. -func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { - flags.BoolVar(&options.V2Only, "disable-legacy-registry", false, "Disable contacting legacy registries") -} diff --git a/vendor/github.com/docker/docker/registry/config_windows.go b/vendor/github.com/docker/docker/registry/config_windows.go deleted file mode 100644 index d1b313dc1..000000000 --- a/vendor/github.com/docker/docker/registry/config_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -package registry - -import ( - "os" - "path/filepath" - "strings" - - "github.com/spf13/pflag" -) - -// CertsDir is the directory where certificates are stored -var CertsDir = os.Getenv("programdata") + `\docker\certs.d` - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:\index.docker.io\v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return filepath.FromSlash(strings.Replace(s, ":", "", -1)) -} - -// installCliPlatformFlags handles any platform specific flags for the service. -func (options *ServiceOptions) installCliPlatformFlags(flags *pflag.FlagSet) { - // No Windows specific flags. -} diff --git a/vendor/github.com/docker/docker/registry/endpoint_v1.go b/vendor/github.com/docker/docker/registry/endpoint_v1.go deleted file mode 100644 index 6bcf8c935..000000000 --- a/vendor/github.com/docker/docker/registry/endpoint_v1.go +++ /dev/null @@ -1,198 +0,0 @@ -package registry - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/transport" - registrytypes "github.com/docker/docker/api/types/registry" -) - -// V1Endpoint stores basic information about a V1 registry endpoint. -type V1Endpoint struct { - client *http.Client - URL *url.URL - IsSecure bool -} - -// NewV1Endpoint parses the given address to return a registry endpoint. -func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - tlsConfig, err := newTLSConfig(index.Name, index.Secure) - if err != nil { - return nil, err - } - - endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) - if err != nil { - return nil, err - } - - if err := validateEndpoint(endpoint); err != nil { - return nil, err - } - - return endpoint, nil -} - -func validateEndpoint(endpoint *V1Endpoint) error { - logrus.Debugf("pinging registry endpoint %s", endpoint) - - // Try HTTPS ping to registry - endpoint.URL.Scheme = "https" - if _, err := endpoint.Ping(); err != nil { - if endpoint.IsSecure { - // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` - // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. - return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) - } - - // If registry is insecure and HTTPS failed, fallback to HTTP. - logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) - endpoint.URL.Scheme = "http" - - var err2 error - if _, err2 = endpoint.Ping(); err2 == nil { - return nil - } - - return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) - } - - return nil -} - -func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - endpoint := &V1Endpoint{ - IsSecure: (tlsConfig == nil || !tlsConfig.InsecureSkipVerify), - URL: new(url.URL), - } - - *endpoint.URL = address - - // TODO(tiborvass): make sure a ConnectTimeout transport is used - tr := NewTransport(tlsConfig) - endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(userAgent, metaHeaders)...)) - return endpoint, nil -} - -// trimV1Address trims the version off the address and returns the -// trimmed address or an error if there is a non-V1 version. -func trimV1Address(address string) (string, error) { - var ( - chunks []string - apiVersionStr string - ) - - if strings.HasSuffix(address, "/") { - address = address[:len(address)-1] - } - - chunks = strings.Split(address, "/") - apiVersionStr = chunks[len(chunks)-1] - if apiVersionStr == "v1" { - return strings.Join(chunks[:len(chunks)-1], "/"), nil - } - - for k, v := range apiVersions { - if k != APIVersion1 && apiVersionStr == v { - return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr) - } - } - - return address, nil -} - -func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { - address = "https://" + address - } - - address, err := trimV1Address(address) - if err != nil { - return nil, err - } - - uri, err := url.Parse(address) - if err != nil { - return nil, err - } - - endpoint, err := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) - if err != nil { - return nil, err - } - - return endpoint, nil -} - -// Get the formatted URL for the root of this registry Endpoint -func (e *V1Endpoint) String() string { - return e.URL.String() + "/v1/" -} - -// Path returns a formatted string for the URL -// of this endpoint with the given path appended. -func (e *V1Endpoint) Path(path string) string { - return e.URL.String() + "/v1/" + path -} - -// Ping returns a PingResult which indicates whether the registry is standalone or not. -func (e *V1Endpoint) Ping() (PingResult, error) { - logrus.Debugf("attempting v1 ping for registry endpoint %s", e) - - if e.String() == IndexServer { - // Skip the check, we know this one is valid - // (and we never want to fallback to http in case of error) - return PingResult{Standalone: false}, nil - } - - req, err := http.NewRequest("GET", e.Path("_ping"), nil) - if err != nil { - return PingResult{Standalone: false}, err - } - - resp, err := e.client.Do(req) - if err != nil { - return PingResult{Standalone: false}, err - } - - defer resp.Body.Close() - - jsonString, err := ioutil.ReadAll(resp.Body) - if err != nil { - return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) - } - - // If the header is absent, we assume true for compatibility with earlier - // versions of the registry. default to true - info := PingResult{ - Standalone: true, - } - if err := json.Unmarshal(jsonString, &info); err != nil { - logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) - // don't stop here. Just assume sane defaults - } - if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { - logrus.Debugf("Registry version header: '%s'", hdr) - info.Version = hdr - } - logrus.Debugf("PingResult.Version: %q", info.Version) - - standalone := resp.Header.Get("X-Docker-Registry-Standalone") - logrus.Debugf("Registry standalone header: '%s'", standalone) - // Accepted values are "true" (case-insensitive) and "1". - if strings.EqualFold(standalone, "true") || standalone == "1" { - info.Standalone = true - } else if len(standalone) > 0 { - // there is a header set, and it is not "true" or "1", so assume fails - info.Standalone = false - } - logrus.Debugf("PingResult.Standalone: %t", info.Standalone) - return info, nil -} diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go deleted file mode 100644 index 17fa97ce3..000000000 --- a/vendor/github.com/docker/docker/registry/registry.go +++ /dev/null @@ -1,191 +0,0 @@ -// Package registry contains client primitives to interact with a remote Docker registry. -package registry - -import ( - "crypto/tls" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "path/filepath" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" -) - -var ( - // ErrAlreadyExists is an error returned if an image being pushed - // already exists on the remote side - ErrAlreadyExists = errors.New("Image already exists") -) - -func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { - // PreferredServerCipherSuites should have no effect - tlsConfig := tlsconfig.ServerDefault() - - tlsConfig.InsecureSkipVerify = !isSecure - - if isSecure && CertsDir != "" { - hostDir := filepath.Join(CertsDir, cleanPath(hostname)) - logrus.Debugf("hostDir: %s", hostDir) - if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { - return nil, err - } - } - - return tlsConfig, nil -} - -func hasFile(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - -// ReadCertsDirectory reads the directory for TLS certificates -// including roots and certificate pairs and updates the -// provided TLS configuration. -func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { - fs, err := ioutil.ReadDir(directory) - if err != nil && !os.IsNotExist(err) { - return err - } - - for _, f := range fs { - if strings.HasSuffix(f.Name(), ".crt") { - if tlsConfig.RootCAs == nil { - systemPool, err := tlsconfig.SystemCertPool() - if err != nil { - return fmt.Errorf("unable to get system cert pool: %v", err) - } - tlsConfig.RootCAs = systemPool - } - logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) - data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) - if err != nil { - return err - } - tlsConfig.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, keyName) { - return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) - if err != nil { - return err - } - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, certName) { - return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) - } - } - } - - return nil -} - -// DockerHeaders returns request modifiers with a User-Agent and metaHeaders -func DockerHeaders(userAgent string, metaHeaders http.Header) []transport.RequestModifier { - modifiers := []transport.RequestModifier{} - if userAgent != "" { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ - "User-Agent": []string{userAgent}, - })) - } - if metaHeaders != nil { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) - } - return modifiers -} - -// HTTPClient returns an HTTP client structure which uses the given transport -// and contains the necessary headers for redirected requests -func HTTPClient(transport http.RoundTripper) *http.Client { - return &http.Client{ - Transport: transport, - CheckRedirect: addRequiredHeadersToRedirectedRequests, - } -} - -func trustedLocation(req *http.Request) bool { - var ( - trusteds = []string{"docker.com", "docker.io"} - hostname = strings.SplitN(req.Host, ":", 2)[0] - ) - if req.URL.Scheme != "https" { - return false - } - - for _, trusted := range trusteds { - if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { - return true - } - } - return false -} - -// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers -// for redirected requests -func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { - if via != nil && via[0] != nil { - if trustedLocation(req) && trustedLocation(via[0]) { - req.Header = via[0].Header - return nil - } - for k, v := range via[0].Header { - if k != "Authorization" { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - } - } - return nil -} - -// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the -// default TLS configuration. -func NewTransport(tlsConfig *tls.Config) *http.Transport { - if tlsConfig == nil { - tlsConfig = tlsconfig.ServerDefault() - } - - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - - base := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: direct.Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } - - proxyDialer, err := sockets.DialerFromEnvironment(direct) - if err == nil { - base.Dial = proxyDialer.Dial - } - return base -} diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go deleted file mode 100644 index 56dabab75..000000000 --- a/vendor/github.com/docker/docker/registry/service.go +++ /dev/null @@ -1,313 +0,0 @@ -package registry - -import ( - "crypto/tls" - "fmt" - "net/http" - "net/url" - "strings" - "sync" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" -) - -const ( - // DefaultSearchLimit is the default value for maximum number of returned search results. - DefaultSearchLimit = 25 -) - -// Service is the interface defining what a registry service should implement. -type Service interface { - Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) - LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) - LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) - ResolveRepository(name reference.Named) (*RepositoryInfo, error) - Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) - ServiceConfig() *registrytypes.ServiceConfig - TLSConfig(hostname string) (*tls.Config, error) - LoadMirrors([]string) error - LoadInsecureRegistries([]string) error -} - -// DefaultService is a registry service. It tracks configuration data such as a list -// of mirrors. -type DefaultService struct { - config *serviceConfig - mu sync.Mutex -} - -// NewService returns a new instance of DefaultService ready to be -// installed into an engine. -func NewService(options ServiceOptions) *DefaultService { - return &DefaultService{ - config: newServiceConfig(options), - } -} - -// ServiceConfig returns the public registry service configuration. -func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { - s.mu.Lock() - defer s.mu.Unlock() - - servConfig := registrytypes.ServiceConfig{ - InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), - IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), - Mirrors: make([]string, 0), - } - - // construct a new ServiceConfig which will not retrieve s.Config directly, - // and look up items in s.config with mu locked - servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...) - - for key, value := range s.config.ServiceConfig.IndexConfigs { - servConfig.IndexConfigs[key] = value - } - - servConfig.Mirrors = append(servConfig.Mirrors, s.config.ServiceConfig.Mirrors...) - - return &servConfig -} - -// LoadMirrors loads registry mirrors for Service -func (s *DefaultService) LoadMirrors(mirrors []string) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.config.LoadMirrors(mirrors) -} - -// LoadInsecureRegistries loads insecure registries for Service -func (s *DefaultService) LoadInsecureRegistries(registries []string) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.config.LoadInsecureRegistries(registries) -} - -// Auth contacts the public registry with the provided credentials, -// and returns OK if authentication was successful. -// It can be used to verify the validity of a client's credentials. -func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) { - // TODO Use ctx when searching for repositories - serverAddress := authConfig.ServerAddress - if serverAddress == "" { - serverAddress = IndexServer - } - if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { - serverAddress = "https://" + serverAddress - } - u, err := url.Parse(serverAddress) - if err != nil { - return "", "", fmt.Errorf("unable to parse server address: %v", err) - } - - endpoints, err := s.LookupPushEndpoints(u.Host) - if err != nil { - return "", "", err - } - - for _, endpoint := range endpoints { - login := loginV2 - if endpoint.Version == APIVersion1 { - login = loginV1 - } - - status, token, err = login(authConfig, endpoint, userAgent) - if err == nil { - return - } - if fErr, ok := err.(fallbackError); ok { - err = fErr.err - logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err) - continue - } - return "", "", err - } - - return "", "", err -} - -// splitReposSearchTerm breaks a search term into an index name and remote name -func splitReposSearchTerm(reposName string) (string, string) { - nameParts := strings.SplitN(reposName, "/", 2) - var indexName, remoteName string - if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && - !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { - // This is a Docker Index repos (ex: samalba/hipache or ubuntu) - // 'docker.io' - indexName = IndexName - remoteName = reposName - } else { - indexName = nameParts[0] - remoteName = nameParts[1] - } - return indexName, remoteName -} - -// Search queries the public registry for images matching the specified -// search terms, and returns the results. -func (s *DefaultService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { - // TODO Use ctx when searching for repositories - if err := validateNoScheme(term); err != nil { - return nil, err - } - - indexName, remoteName := splitReposSearchTerm(term) - - // Search is a long-running operation, just lock s.config to avoid block others. - s.mu.Lock() - index, err := newIndexInfo(s.config, indexName) - s.mu.Unlock() - - if err != nil { - return nil, err - } - - // *TODO: Search multiple indexes. - endpoint, err := NewV1Endpoint(index, userAgent, http.Header(headers)) - if err != nil { - return nil, err - } - - var client *http.Client - if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { - creds := NewStaticCredentialStore(authConfig) - scopes := []auth.Scope{ - auth.RegistryScope{ - Name: "catalog", - Actions: []string{"search"}, - }, - } - - modifiers := DockerHeaders(userAgent, nil) - v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) - if err != nil { - if fErr, ok := err.(fallbackError); ok { - logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err) - } else { - return nil, err - } - } else if foundV2 { - // Copy non transport http client features - v2Client.Timeout = endpoint.client.Timeout - v2Client.CheckRedirect = endpoint.client.CheckRedirect - v2Client.Jar = endpoint.client.Jar - - logrus.Debugf("using v2 client for search to %s", endpoint.URL) - client = v2Client - } - } - - if client == nil { - client = endpoint.client - if err := authorizeClient(client, authConfig, endpoint); err != nil { - return nil, err - } - } - - r := newSession(client, authConfig, endpoint) - - if index.Official { - localName := remoteName - if strings.HasPrefix(localName, "library/") { - // If pull "library/foo", it's stored locally under "foo" - localName = strings.SplitN(localName, "/", 2)[1] - } - - return r.SearchRepositories(localName, limit) - } - return r.SearchRepositories(remoteName, limit) -} - -// ResolveRepository splits a repository name into its components -// and configuration of the associated registry. -func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { - s.mu.Lock() - defer s.mu.Unlock() - return newRepositoryInfo(s.config, name) -} - -// APIEndpoint represents a remote API endpoint -type APIEndpoint struct { - Mirror bool - URL *url.URL - Version APIVersion - Official bool - TrimHostname bool - TLSConfig *tls.Config -} - -// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint -func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) -} - -// TLSConfig constructs a client TLS configuration based on server defaults -func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) { - s.mu.Lock() - defer s.mu.Unlock() - - return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) -} - -// tlsConfig constructs a client TLS configuration based on server defaults -func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) { - return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) -} - -func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) { - return s.tlsConfig(mirrorURL.Host) -} - -// LookupPullEndpoints creates a list of endpoints to try to pull from, in order of preference. -// It gives preference to v2 endpoints over v1, mirrors over the actual -// registry, and HTTPS over plain HTTP. -func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - s.mu.Lock() - defer s.mu.Unlock() - - return s.lookupEndpoints(hostname) -} - -// LookupPushEndpoints creates a list of endpoints to try to push to, in order of preference. -// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. -// Mirrors are not included. -func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - s.mu.Lock() - defer s.mu.Unlock() - - allEndpoints, err := s.lookupEndpoints(hostname) - if err == nil { - for _, endpoint := range allEndpoints { - if !endpoint.Mirror { - endpoints = append(endpoints, endpoint) - } - } - } - return endpoints, err -} - -func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - endpoints, err = s.lookupV2Endpoints(hostname) - if err != nil { - return nil, err - } - - if s.config.V2Only { - return endpoints, nil - } - - legacyEndpoints, err := s.lookupV1Endpoints(hostname) - if err != nil { - return nil, err - } - endpoints = append(endpoints, legacyEndpoints...) - - return endpoints, nil -} diff --git a/vendor/github.com/docker/docker/registry/service_v1.go b/vendor/github.com/docker/docker/registry/service_v1.go deleted file mode 100644 index 1d251aec6..000000000 --- a/vendor/github.com/docker/docker/registry/service_v1.go +++ /dev/null @@ -1,40 +0,0 @@ -package registry - -import "net/url" - -func (s *DefaultService) lookupV1Endpoints(hostname string) (endpoints []APIEndpoint, err error) { - if hostname == DefaultNamespace || hostname == DefaultV2Registry.Host || hostname == IndexHostname { - return []APIEndpoint{}, nil - } - - tlsConfig, err := s.tlsConfig(hostname) - if err != nil { - return nil, err - } - - endpoints = []APIEndpoint{ - { - URL: &url.URL{ - Scheme: "https", - Host: hostname, - }, - Version: APIVersion1, - TrimHostname: true, - TLSConfig: tlsConfig, - }, - } - - if tlsConfig.InsecureSkipVerify { - endpoints = append(endpoints, APIEndpoint{ // or this - URL: &url.URL{ - Scheme: "http", - Host: hostname, - }, - Version: APIVersion1, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) - } - return endpoints, nil -} diff --git a/vendor/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go deleted file mode 100644 index 228d745f8..000000000 --- a/vendor/github.com/docker/docker/registry/service_v2.go +++ /dev/null @@ -1,78 +0,0 @@ -package registry - -import ( - "net/url" - "strings" - - "github.com/docker/go-connections/tlsconfig" -) - -func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { - tlsConfig := tlsconfig.ServerDefault() - if hostname == DefaultNamespace || hostname == IndexHostname { - // v2 mirrors - for _, mirror := range s.config.Mirrors { - if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { - mirror = "https://" + mirror - } - mirrorURL, err := url.Parse(mirror) - if err != nil { - return nil, err - } - mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL) - if err != nil { - return nil, err - } - endpoints = append(endpoints, APIEndpoint{ - URL: mirrorURL, - // guess mirrors are v2 - Version: APIVersion2, - Mirror: true, - TrimHostname: true, - TLSConfig: mirrorTLSConfig, - }) - } - // v2 registry - endpoints = append(endpoints, APIEndpoint{ - URL: DefaultV2Registry, - Version: APIVersion2, - Official: true, - TrimHostname: true, - TLSConfig: tlsConfig, - }) - - return endpoints, nil - } - - tlsConfig, err = s.tlsConfig(hostname) - if err != nil { - return nil, err - } - - endpoints = []APIEndpoint{ - { - URL: &url.URL{ - Scheme: "https", - Host: hostname, - }, - Version: APIVersion2, - TrimHostname: true, - TLSConfig: tlsConfig, - }, - } - - if tlsConfig.InsecureSkipVerify { - endpoints = append(endpoints, APIEndpoint{ - URL: &url.URL{ - Scheme: "http", - Host: hostname, - }, - Version: APIVersion2, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) - } - - return endpoints, nil -} diff --git a/vendor/github.com/docker/docker/registry/session.go b/vendor/github.com/docker/docker/registry/session.go deleted file mode 100644 index c71e77803..000000000 --- a/vendor/github.com/docker/docker/registry/session.go +++ /dev/null @@ -1,783 +0,0 @@ -package registry - -import ( - "bytes" - "crypto/sha256" - "errors" - "sync" - // this is required for some certificates - _ "crypto/sha512" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/cookiejar" - "net/url" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/tarsum" -) - -var ( - // ErrRepoNotFound is returned if the repository didn't exist on the - // remote side - ErrRepoNotFound = errors.New("Repository not found") -) - -// A Session is used to communicate with a V1 registry -type Session struct { - indexEndpoint *V1Endpoint - client *http.Client - // TODO(tiborvass): remove authConfig - authConfig *types.AuthConfig - id string -} - -type authTransport struct { - http.RoundTripper - *types.AuthConfig - - alwaysSetBasicAuth bool - token []string - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) -// -// For private v1 registries, set alwaysSetBasicAuth to true. -// -// For the official v1 registry, if there isn't already an Authorization header in the request, -// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. -// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing -// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent -// requests. -// -// If the server sends a token without the client having requested it, it is ignored. -// -// This RoundTripper also has a CancelRequest method important for correct timeout handling. -func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { - if base == nil { - base = http.DefaultTransport - } - return &authTransport{ - RoundTripper: base, - AuthConfig: authConfig, - alwaysSetBasicAuth: alwaysSetBasicAuth, - modReq: make(map[*http.Request]*http.Request), - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -// RoundTrip changes an HTTP request's headers to add the necessary -// authentication-related headers -func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { - // Authorization should not be set on 302 redirect for untrusted locations. - // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. - // As the authorization logic is currently implemented in RoundTrip, - // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. - // This is safe as Docker doesn't set Referrer in other scenarios. - if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { - return tr.RoundTripper.RoundTrip(orig) - } - - req := cloneRequest(orig) - tr.mu.Lock() - tr.modReq[orig] = req - tr.mu.Unlock() - - if tr.alwaysSetBasicAuth { - if tr.AuthConfig == nil { - return nil, errors.New("unexpected error: empty auth config") - } - req.SetBasicAuth(tr.Username, tr.Password) - return tr.RoundTripper.RoundTrip(req) - } - - // Don't override - if req.Header.Get("Authorization") == "" { - if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { - req.SetBasicAuth(tr.Username, tr.Password) - } else if len(tr.token) > 0 { - req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) - } - } - resp, err := tr.RoundTripper.RoundTrip(req) - if err != nil { - delete(tr.modReq, orig) - return nil, err - } - if len(resp.Header["X-Docker-Token"]) > 0 { - tr.token = resp.Header["X-Docker-Token"] - } - resp.Body = &ioutils.OnEOFReader{ - Rc: resp.Body, - Fn: func() { - tr.mu.Lock() - delete(tr.modReq, orig) - tr.mu.Unlock() - }, - } - return resp, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (tr *authTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := tr.RoundTripper.(canceler); ok { - tr.mu.Lock() - modReq := tr.modReq[req] - delete(tr.modReq, req) - tr.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func authorizeClient(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) error { - var alwaysSetBasicAuth bool - - // If we're working with a standalone private registry over HTTPS, send Basic Auth headers - // alongside all our requests. - if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { - info, err := endpoint.Ping() - if err != nil { - return err - } - if info.Standalone && authConfig != nil { - logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) - alwaysSetBasicAuth = true - } - } - - // Annotate the transport unconditionally so that v2 can - // properly fallback on v1 when an image is not found. - client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) - - jar, err := cookiejar.New(nil) - if err != nil { - return errors.New("cookiejar.New is not supposed to return an error") - } - client.Jar = jar - - return nil -} - -func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session { - return &Session{ - authConfig: authConfig, - client: client, - indexEndpoint: endpoint, - id: stringid.GenerateRandomID(), - } -} - -// NewSession creates a new session -// TODO(tiborvass): remove authConfig param once registry client v2 is vendored -func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (*Session, error) { - if err := authorizeClient(client, authConfig, endpoint); err != nil { - return nil, err - } - - return newSession(client, authConfig, endpoint), nil -} - -// ID returns this registry session's ID. -func (r *Session) ID() string { - return r.id -} - -// GetRemoteHistory retrieves the history of a given image from the registry. -// It returns a list of the parent's JSON files (including the requested image). -func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { - res, err := r.client.Get(registry + "images/" + imgID + "/ancestry") - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != 200 { - if res.StatusCode == 401 { - return nil, errcode.ErrorCodeUnauthorized.WithArgs() - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) - } - - var history []string - if err := json.NewDecoder(res.Body).Decode(&history); err != nil { - return nil, fmt.Errorf("Error while reading the http response: %v", err) - } - - logrus.Debugf("Ancestry: %v", history) - return history, nil -} - -// LookupRemoteImage checks if an image exists in the registry -func (r *Session) LookupRemoteImage(imgID, registry string) error { - res, err := r.client.Get(registry + "images/" + imgID + "/json") - if err != nil { - return err - } - res.Body.Close() - if res.StatusCode != 200 { - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) - } - return nil -} - -// GetRemoteImageJSON retrieves an image's JSON metadata from the registry. -func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) { - res, err := r.client.Get(registry + "images/" + imgID + "/json") - if err != nil { - return nil, -1, fmt.Errorf("Failed to download json: %s", err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) - } - // if the size header is not present, then set it to '-1' - imageSize := int64(-1) - if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { - imageSize, err = strconv.ParseInt(hdr, 10, 64) - if err != nil { - return nil, -1, err - } - } - - jsonString, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString) - } - return jsonString, imageSize, nil -} - -// GetRemoteImageLayer retrieves an image layer from the registry -func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { - var ( - statusCode = 0 - res *http.Response - err error - imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) - ) - - req, err := http.NewRequest("GET", imageURL, nil) - if err != nil { - return nil, fmt.Errorf("Error while getting from the server: %v", err) - } - - res, err = r.client.Do(req) - if err != nil { - logrus.Debugf("Error contacting registry %s: %v", registry, err) - // the only case err != nil && res != nil is https://golang.org/src/net/http/client.go#L515 - if res != nil { - if res.Body != nil { - res.Body.Close() - } - statusCode = res.StatusCode - } - return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", - statusCode, imgID) - } - - if res.StatusCode != 200 { - res.Body.Close() - return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", - res.StatusCode, imgID) - } - - if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { - logrus.Debug("server supports resume") - return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil - } - logrus.Debug("server doesn't support resume") - return res.Body, nil -} - -// GetRemoteTag retrieves the tag named in the askedTag argument from the given -// repository. It queries each of the registries supplied in the registries -// argument, and returns data from the first one that answers the query -// successfully. -func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { - repository := reference.Path(repositoryRef) - - if strings.Count(repository, "/") == 0 { - // This will be removed once the registry supports auto-resolution on - // the "library" namespace - repository = "library/" + repository - } - for _, host := range registries { - endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag) - res, err := r.client.Get(endpoint) - if err != nil { - return "", err - } - - logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) - defer res.Body.Close() - - if res.StatusCode == 404 { - return "", ErrRepoNotFound - } - if res.StatusCode != 200 { - continue - } - - var tagID string - if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil { - return "", err - } - return tagID, nil - } - return "", fmt.Errorf("Could not reach any registry endpoint") -} - -// GetRemoteTags retrieves all tags from the given repository. It queries each -// of the registries supplied in the registries argument, and returns data from -// the first one that answers the query successfully. It returns a map with -// tag names as the keys and image IDs as the values. -func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { - repository := reference.Path(repositoryRef) - - if strings.Count(repository, "/") == 0 { - // This will be removed once the registry supports auto-resolution on - // the "library" namespace - repository = "library/" + repository - } - for _, host := range registries { - endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) - res, err := r.client.Get(endpoint) - if err != nil { - return nil, err - } - - logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) - defer res.Body.Close() - - if res.StatusCode == 404 { - return nil, ErrRepoNotFound - } - if res.StatusCode != 200 { - continue - } - - result := make(map[string]string) - if err := json.NewDecoder(res.Body).Decode(&result); err != nil { - return nil, err - } - return result, nil - } - return nil, fmt.Errorf("Could not reach any registry endpoint") -} - -func buildEndpointsList(headers []string, indexEp string) ([]string, error) { - var endpoints []string - parsedURL, err := url.Parse(indexEp) - if err != nil { - return nil, err - } - var urlScheme = parsedURL.Scheme - // The registry's URL scheme has to match the Index' - for _, ep := range headers { - epList := strings.Split(ep, ",") - for _, epListElement := range epList { - endpoints = append( - endpoints, - fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) - } - } - return endpoints, nil -} - -// GetRepositoryData returns lists of images and endpoints for the repository -func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) { - repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), reference.Path(name)) - - logrus.Debugf("[registry] Calling GET %s", repositoryTarget) - - req, err := http.NewRequest("GET", repositoryTarget, nil) - if err != nil { - return nil, err - } - // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - // check if the error is because of i/o timeout - // and return a non-obtuse error message for users - // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" - // was a top search on the docker user forum - if isTimeout(err) { - return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) - } - return nil, fmt.Errorf("Error while pulling image: %v", err) - } - defer res.Body.Close() - if res.StatusCode == 401 { - return nil, errcode.ErrorCodeUnauthorized.WithArgs() - } - // TODO: Right now we're ignoring checksums in the response body. - // In the future, we need to use them to check image validity. - if res.StatusCode == 404 { - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) - } else if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - logrus.Debugf("Error reading response body: %s", err) - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, reference.Path(name), errBody), res) - } - - var endpoints []string - if res.Header.Get("X-Docker-Endpoints") != "" { - endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) - if err != nil { - return nil, err - } - } else { - // Assume the endpoint is on the same host - endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) - } - - remoteChecksums := []*ImgData{} - if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { - return nil, err - } - - // Forge a better object from the retrieved data - imgsData := make(map[string]*ImgData, len(remoteChecksums)) - for _, elem := range remoteChecksums { - imgsData[elem.ID] = elem - } - - return &RepositoryData{ - ImgList: imgsData, - Endpoints: endpoints, - }, nil -} - -// PushImageChecksumRegistry uploads checksums for an image -func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { - u := registry + "images/" + imgData.ID + "/checksum" - - logrus.Debugf("[registry] Calling PUT %s", u) - - req, err := http.NewRequest("PUT", u, nil) - if err != nil { - return err - } - req.Header.Set("X-Docker-Checksum", imgData.Checksum) - req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) - - res, err := r.client.Do(req) - if err != nil { - return fmt.Errorf("Failed to upload metadata: %v", err) - } - defer res.Body.Close() - if len(res.Cookies()) > 0 { - r.client.Jar.SetCookies(req.URL, res.Cookies()) - } - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) - } - var jsonBody map[string]string - if err := json.Unmarshal(errBody, &jsonBody); err != nil { - errBody = []byte(err.Error()) - } else if jsonBody["error"] == "Image already exists" { - return ErrAlreadyExists - } - return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) - } - return nil -} - -// PushImageJSONRegistry pushes JSON metadata for a local image to the registry -func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { - - u := registry + "images/" + imgData.ID + "/json" - - logrus.Debugf("[registry] Calling PUT %s", u) - - req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) - if err != nil { - return err - } - req.Header.Add("Content-type", "application/json") - - res, err := r.client.Do(req) - if err != nil { - return fmt.Errorf("Failed to upload metadata: %s", err) - } - defer res.Body.Close() - if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { - return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) - } - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) - } - var jsonBody map[string]string - if err := json.Unmarshal(errBody, &jsonBody); err != nil { - errBody = []byte(err.Error()) - } else if jsonBody["error"] == "Image already exists" { - return ErrAlreadyExists - } - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) - } - return nil -} - -// PushImageLayerRegistry sends the checksum of an image layer to the registry -func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { - u := registry + "images/" + imgID + "/layer" - - logrus.Debugf("[registry] Calling PUT %s", u) - - tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) - if err != nil { - return "", "", err - } - h := sha256.New() - h.Write(jsonRaw) - h.Write([]byte{'\n'}) - checksumLayer := io.TeeReader(tarsumLayer, h) - - req, err := http.NewRequest("PUT", u, checksumLayer) - if err != nil { - return "", "", err - } - req.Header.Add("Content-Type", "application/octet-stream") - req.ContentLength = -1 - req.TransferEncoding = []string{"chunked"} - res, err := r.client.Do(req) - if err != nil { - return "", "", fmt.Errorf("Failed to upload layer: %v", err) - } - if rc, ok := layer.(io.Closer); ok { - if err := rc.Close(); err != nil { - return "", "", err - } - } - defer res.Body.Close() - - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) - } - return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) - } - - checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) - return tarsumLayer.Sum(jsonRaw), checksumPayload, nil -} - -// PushRegistryTag pushes a tag on the registry. -// Remote has the format '/ -func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { - // "jsonify" the string - revision = "\"" + revision + "\"" - path := fmt.Sprintf("repositories/%s/tags/%s", reference.Path(remote), tag) - - req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) - if err != nil { - return err - } - req.Header.Add("Content-type", "application/json") - req.ContentLength = int64(len(revision)) - res, err := r.client.Do(req) - if err != nil { - return err - } - res.Body.Close() - if res.StatusCode != 200 && res.StatusCode != 201 { - return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, reference.Path(remote)), res) - } - return nil -} - -// PushImageJSONIndex uploads an image list to the repository -func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { - cleanImgList := []*ImgData{} - if validate { - for _, elem := range imgList { - if elem.Checksum != "" { - cleanImgList = append(cleanImgList, elem) - } - } - } else { - cleanImgList = imgList - } - - imgListJSON, err := json.Marshal(cleanImgList) - if err != nil { - return nil, err - } - var suffix string - if validate { - suffix = "images" - } - u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), reference.Path(remote), suffix) - logrus.Debugf("[registry] PUT %s", u) - logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) - headers := map[string][]string{ - "Content-type": {"application/json"}, - // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests - "X-Docker-Token": {"true"}, - } - if validate { - headers["X-Docker-Endpoints"] = regs - } - - // Redirect if necessary - var res *http.Response - for { - if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { - return nil, err - } - if !shouldRedirect(res) { - break - } - res.Body.Close() - u = res.Header.Get("Location") - logrus.Debugf("Redirected to %s", u) - } - defer res.Body.Close() - - if res.StatusCode == 401 { - return nil, errcode.ErrorCodeUnauthorized.WithArgs() - } - - var tokens, endpoints []string - if !validate { - if res.StatusCode != 200 && res.StatusCode != 201 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - logrus.Debugf("Error reading response body: %s", err) - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, reference.Path(remote), errBody), res) - } - tokens = res.Header["X-Docker-Token"] - logrus.Debugf("Auth token: %v", tokens) - - if res.Header.Get("X-Docker-Endpoints") == "" { - return nil, fmt.Errorf("Index response didn't contain any endpoints") - } - endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) - if err != nil { - return nil, err - } - } else { - if res.StatusCode != 204 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - logrus.Debugf("Error reading response body: %s", err) - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, reference.Path(remote), errBody), res) - } - } - - return &RepositoryData{ - Endpoints: endpoints, - }, nil -} - -func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { - req, err := http.NewRequest("PUT", u, bytes.NewReader(body)) - if err != nil { - return nil, err - } - req.ContentLength = int64(len(body)) - for k, v := range headers { - req.Header[k] = v - } - response, err := r.client.Do(req) - if err != nil { - return nil, err - } - return response, nil -} - -func shouldRedirect(response *http.Response) bool { - return response.StatusCode >= 300 && response.StatusCode < 400 -} - -// SearchRepositories performs a search against the remote repository -func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) { - if limit < 1 || limit > 100 { - return nil, fmt.Errorf("Limit %d is outside the range of [1, 100]", limit) - } - logrus.Debugf("Index server: %s", r.indexEndpoint) - u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, fmt.Errorf("Error while getting from the server: %v", err) - } - // Have the AuthTransport send authentication, when logged in. - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != 200 { - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) - } - result := new(registrytypes.SearchResults) - return result, json.NewDecoder(res.Body).Decode(result) -} - -// GetAuthConfig returns the authentication settings for a session -// TODO(tiborvass): remove this once registry client v2 is vendored -func (r *Session) GetAuthConfig(withPasswd bool) *types.AuthConfig { - password := "" - if withPasswd { - password = r.authConfig.Password - } - return &types.AuthConfig{ - Username: r.authConfig.Username, - Password: password, - } -} - -func isTimeout(err error) bool { - type timeout interface { - Timeout() bool - } - e := err - switch urlErr := err.(type) { - case *url.Error: - e = urlErr.Err - } - t, ok := e.(timeout) - return ok && t.Timeout() -} diff --git a/vendor/github.com/docker/docker/registry/types.go b/vendor/github.com/docker/docker/registry/types.go deleted file mode 100644 index 0c3cbd691..000000000 --- a/vendor/github.com/docker/docker/registry/types.go +++ /dev/null @@ -1,70 +0,0 @@ -package registry - -import ( - "github.com/docker/distribution/reference" - registrytypes "github.com/docker/docker/api/types/registry" -) - -// RepositoryData tracks the image list, list of endpoints for a repository -type RepositoryData struct { - // ImgList is a list of images in the repository - ImgList map[string]*ImgData - // Endpoints is a list of endpoints returned in X-Docker-Endpoints - Endpoints []string -} - -// ImgData is used to transfer image checksums to and from the registry -type ImgData struct { - // ID is an opaque string that identifies the image - ID string `json:"id"` - Checksum string `json:"checksum,omitempty"` - ChecksumPayload string `json:"-"` - Tag string `json:",omitempty"` -} - -// PingResult contains the information returned when pinging a registry. It -// indicates the registry's version and whether the registry claims to be a -// standalone registry. -type PingResult struct { - // Version is the registry version supplied by the registry in an HTTP - // header - Version string `json:"version"` - // Standalone is set to true if the registry indicates it is a - // standalone registry in the X-Docker-Registry-Standalone - // header - Standalone bool `json:"standalone"` -} - -// APIVersion is an integral representation of an API version (presently -// either 1 or 2) -type APIVersion int - -func (av APIVersion) String() string { - return apiVersions[av] -} - -// API Version identifiers. -const ( - _ = iota - APIVersion1 APIVersion = iota - APIVersion2 -) - -var apiVersions = map[APIVersion]string{ - APIVersion1: "v1", - APIVersion2: "v2", -} - -// RepositoryInfo describes a repository -type RepositoryInfo struct { - Name reference.Named - // Index points to registry information - Index *registrytypes.IndexInfo - // Official indicates whether the repository is considered official. - // If the registry is official, and the normalized name does not - // contain a '/' (e.g. "foo"), then it is considered an official repo. - Official bool - // Class represents the class of the repository, such as "plugin" - // or "image". - Class string -} diff --git a/vendor/github.com/docker/notary/passphrase/passphrase.go b/vendor/github.com/docker/notary/passphrase/passphrase.go deleted file mode 100644 index 23c7e6a0b..000000000 --- a/vendor/github.com/docker/notary/passphrase/passphrase.go +++ /dev/null @@ -1,205 +0,0 @@ -// Package passphrase is a utility function for managing passphrase -// for TUF and Notary keys. -package passphrase - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/term" - "github.com/docker/notary" -) - -const ( - idBytesToDisplay = 7 - tufRootAlias = "root" - tufTargetsAlias = "targets" - tufSnapshotAlias = "snapshot" - tufRootKeyGenerationWarning = `You are about to create a new root signing key passphrase. This passphrase -will be used to protect the most sensitive key in your signing system. Please -choose a long, complex passphrase and be careful to keep the password and the -key file itself secure and backed up. It is highly recommended that you use a -password manager to generate the passphrase and keep it safe. There will be no -way to recover this key. You can find the key in your config directory.` -) - -var ( - // ErrTooShort is returned if the passphrase entered for a new key is - // below the minimum length - ErrTooShort = errors.New("Passphrase too short") - - // ErrDontMatch is returned if the two entered passphrases don't match. - // new key is below the minimum length - ErrDontMatch = errors.New("The entered passphrases do not match") - - // ErrTooManyAttempts is returned if the maximum number of passphrase - // entry attempts is reached. - ErrTooManyAttempts = errors.New("Too many attempts") - - // ErrNoInput is returned if we do not have a valid input method for passphrases - ErrNoInput = errors.New("Please either use environment variables or STDIN with a terminal to provide key passphrases") -) - -// PromptRetriever returns a new Retriever which will provide a prompt on stdin -// and stdout to retrieve a passphrase. stdin will be checked if it is a terminal, -// else the PromptRetriever will error when attempting to retrieve a passphrase. -// Upon successful passphrase retrievals, the passphrase will be cached such that -// subsequent prompts will produce the same passphrase. -func PromptRetriever() notary.PassRetriever { - if !term.IsTerminal(os.Stdin.Fd()) { - return func(string, string, bool, int) (string, bool, error) { - return "", false, ErrNoInput - } - } - return PromptRetrieverWithInOut(os.Stdin, os.Stdout, nil) -} - -type boundRetriever struct { - in io.Reader - out io.Writer - aliasMap map[string]string - passphraseCache map[string]string -} - -func (br *boundRetriever) getPassphrase(keyName, alias string, createNew bool, numAttempts int) (string, bool, error) { - if numAttempts == 0 { - if alias == tufRootAlias && createNew { - fmt.Fprintln(br.out, tufRootKeyGenerationWarning) - } - - if pass, ok := br.passphraseCache[alias]; ok { - return pass, false, nil - } - } else if !createNew { // per `if`, numAttempts > 0 if we're at this `else` - if numAttempts > 3 { - return "", true, ErrTooManyAttempts - } - fmt.Fprintln(br.out, "Passphrase incorrect. Please retry.") - } - - // passphrase not cached and we're not aborting, get passphrase from user! - return br.requestPassphrase(keyName, alias, createNew, numAttempts) -} - -func (br *boundRetriever) requestPassphrase(keyName, alias string, createNew bool, numAttempts int) (string, bool, error) { - // Figure out if we should display a different string for this alias - displayAlias := alias - if val, ok := br.aliasMap[alias]; ok { - displayAlias = val - } - - // If typing on the terminal, we do not want the terminal to echo the - // password that is typed (so it doesn't display) - if term.IsTerminal(os.Stdin.Fd()) { - state, err := term.SaveState(os.Stdin.Fd()) - if err != nil { - return "", false, err - } - term.DisableEcho(os.Stdin.Fd(), state) - defer term.RestoreTerminal(os.Stdin.Fd(), state) - } - - indexOfLastSeparator := strings.LastIndex(keyName, string(filepath.Separator)) - if indexOfLastSeparator == -1 { - indexOfLastSeparator = 0 - } - - var shortName string - if len(keyName) > indexOfLastSeparator+idBytesToDisplay { - if indexOfLastSeparator > 0 { - keyNamePrefix := keyName[:indexOfLastSeparator] - keyNameID := keyName[indexOfLastSeparator+1 : indexOfLastSeparator+idBytesToDisplay+1] - shortName = keyNameID + " (" + keyNamePrefix + ")" - } else { - shortName = keyName[indexOfLastSeparator : indexOfLastSeparator+idBytesToDisplay] - } - } - - withID := fmt.Sprintf(" with ID %s", shortName) - if shortName == "" { - withID = "" - } - - switch { - case createNew: - fmt.Fprintf(br.out, "Enter passphrase for new %s key%s: ", displayAlias, withID) - case displayAlias == "yubikey": - fmt.Fprintf(br.out, "Enter the %s for the attached Yubikey: ", keyName) - default: - fmt.Fprintf(br.out, "Enter passphrase for %s key%s: ", displayAlias, withID) - } - - stdin := bufio.NewReader(br.in) - passphrase, err := stdin.ReadBytes('\n') - fmt.Fprintln(br.out) - if err != nil { - return "", false, err - } - - retPass := strings.TrimSpace(string(passphrase)) - - if createNew { - err = br.verifyAndConfirmPassword(stdin, retPass, displayAlias, withID) - if err != nil { - return "", false, err - } - } - - br.cachePassword(alias, retPass) - - return retPass, false, nil -} - -func (br *boundRetriever) verifyAndConfirmPassword(stdin *bufio.Reader, retPass, displayAlias, withID string) error { - if len(retPass) < 8 { - fmt.Fprintln(br.out, "Passphrase is too short. Please use a password manager to generate and store a good random passphrase.") - return ErrTooShort - } - - fmt.Fprintf(br.out, "Repeat passphrase for new %s key%s: ", displayAlias, withID) - confirmation, err := stdin.ReadBytes('\n') - fmt.Fprintln(br.out) - if err != nil { - return err - } - confirmationStr := strings.TrimSpace(string(confirmation)) - - if retPass != confirmationStr { - fmt.Fprintln(br.out, "Passphrases do not match. Please retry.") - return ErrDontMatch - } - return nil -} - -func (br *boundRetriever) cachePassword(alias, retPass string) { - br.passphraseCache[alias] = retPass -} - -// PromptRetrieverWithInOut returns a new Retriever which will provide a -// prompt using the given in and out readers. The passphrase will be cached -// such that subsequent prompts will produce the same passphrase. -// aliasMap can be used to specify display names for TUF key aliases. If aliasMap -// is nil, a sensible default will be used. -func PromptRetrieverWithInOut(in io.Reader, out io.Writer, aliasMap map[string]string) notary.PassRetriever { - bound := &boundRetriever{ - in: in, - out: out, - aliasMap: aliasMap, - passphraseCache: make(map[string]string), - } - - return bound.getPassphrase -} - -// ConstantRetriever returns a new Retriever which will return a constant string -// as a passphrase. -func ConstantRetriever(constantPassphrase string) notary.PassRetriever { - return func(k, a string, c bool, n int) (string, bool, error) { - return constantPassphrase, false, nil - } -} diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE deleted file mode 100644 index 5f0d1fb6a..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2014 Alan Shreve - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md deleted file mode 100644 index 7a950d177..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# mousetrap - -mousetrap is a tiny library that answers a single question. - -On a Windows machine, was the process invoked by someone double clicking on -the executable file while browsing in explorer? - -### Motivation - -Windows developers unfamiliar with command line tools will often "double-click" -the executable for a tool. Because most CLI tools print the help and then exit -when invoked without arguments, this is often very frustrating for those users. - -mousetrap provides a way to detect these invocations so that you can provide -more helpful behavior and instructions on how to run the CLI tool. To see what -this looks like, both from an organizational and a technical perspective, see -https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ - -### The interface - -The library exposes a single interface: - - func StartedByExplorer() (bool) diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go deleted file mode 100644 index 9d2d8a4ba..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_others.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package mousetrap - -// StartedByExplorer returns true if the program was invoked by the user -// double-clicking on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -// -// On non-Windows platforms, it always returns false. -func StartedByExplorer() bool { - return false -} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go deleted file mode 100644 index 336142a5e..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build windows -// +build !go1.4 - -package mousetrap - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -const ( - // defined by the Win32 API - th32cs_snapprocess uintptr = 0x2 -) - -var ( - kernel = syscall.MustLoadDLL("kernel32.dll") - CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") - Process32First = kernel.MustFindProc("Process32FirstW") - Process32Next = kernel.MustFindProc("Process32NextW") -) - -// ProcessEntry32 structure defined by the Win32 API -type processEntry32 struct { - dwSize uint32 - cntUsage uint32 - th32ProcessID uint32 - th32DefaultHeapID int - th32ModuleID uint32 - cntThreads uint32 - th32ParentProcessID uint32 - pcPriClassBase int32 - dwFlags uint32 - szExeFile [syscall.MAX_PATH]uint16 -} - -func getProcessEntry(pid int) (pe *processEntry32, err error) { - snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) - if snapshot == uintptr(syscall.InvalidHandle) { - err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) - return - } - defer syscall.CloseHandle(syscall.Handle(snapshot)) - - var processEntry processEntry32 - processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) - ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32First: %v", e1) - return - } - - for { - if processEntry.th32ProcessID == uint32(pid) { - pe = &processEntry - return - } - - ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) - if ok == 0 { - err = fmt.Errorf("Process32Next: %v", e1) - return - } - } -} - -func getppid() (pid int, err error) { - pe, err := getProcessEntry(os.Getpid()) - if err != nil { - return - } - - pid = int(pe.th32ParentProcessID) - return -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - ppid, err := getppid() - if err != nil { - return false - } - - pe, err := getProcessEntry(ppid) - if err != nil { - return false - } - - name := syscall.UTF16ToString(pe.szExeFile[:]) - return name == "explorer.exe" -} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go deleted file mode 100644 index 9a28e57c3..000000000 --- a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build windows -// +build go1.4 - -package mousetrap - -import ( - "os" - "syscall" - "unsafe" -) - -func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { - snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer syscall.CloseHandle(snapshot) - var procEntry syscall.ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = syscall.Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = syscall.Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -// StartedByExplorer returns true if the program was invoked by the user double-clicking -// on the executable from explorer.exe -// -// It is conservative and returns false if any of the internal calls fail. -// It does not guarantee that the program was run from a terminal. It only can tell you -// whether it was launched from explorer.exe -func StartedByExplorer() bool { - pe, err := getProcessEntry(os.Getppid()) - if err != nil { - return false - } - return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) -} diff --git a/vendor/github.com/mattn/go-shellwords/LICENSE b/vendor/github.com/mattn/go-shellwords/LICENSE deleted file mode 100644 index 740fa9313..000000000 --- a/vendor/github.com/mattn/go-shellwords/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2017 Yasuhiro Matsumoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mattn/go-shellwords/README.md b/vendor/github.com/mattn/go-shellwords/README.md deleted file mode 100644 index b1d235c78..000000000 --- a/vendor/github.com/mattn/go-shellwords/README.md +++ /dev/null @@ -1,47 +0,0 @@ -# go-shellwords - -[![Coverage Status](https://coveralls.io/repos/mattn/go-shellwords/badge.png?branch=master)](https://coveralls.io/r/mattn/go-shellwords?branch=master) -[![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords) - -Parse line as shell words. - -## Usage - -```go -args, err := shellwords.Parse("./foo --bar=baz") -// args should be ["./foo", "--bar=baz"] -``` - -```go -os.Setenv("FOO", "bar") -p := shellwords.NewParser() -p.ParseEnv = true -args, err := p.Parse("./foo $FOO") -// args should be ["./foo", "bar"] -``` - -```go -p := shellwords.NewParser() -p.ParseBacktick = true -args, err := p.Parse("./foo `echo $SHELL`") -// args should be ["./foo", "/bin/bash"] -``` - -```go -shellwords.ParseBacktick = true -p := shellwords.NewParser() -args, err := p.Parse("./foo `echo $SHELL`") -// args should be ["./foo", "/bin/bash"] -``` - -# Thanks - -This is based on cpan module [Parse::CommandLine](https://metacpan.org/pod/Parse::CommandLine). - -# License - -under the MIT License: http://mattn.mit-license.org/2017 - -# Author - -Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-shellwords/shellwords.go b/vendor/github.com/mattn/go-shellwords/shellwords.go deleted file mode 100644 index 107803927..000000000 --- a/vendor/github.com/mattn/go-shellwords/shellwords.go +++ /dev/null @@ -1,145 +0,0 @@ -package shellwords - -import ( - "errors" - "os" - "regexp" -) - -var ( - ParseEnv bool = false - ParseBacktick bool = false -) - -var envRe = regexp.MustCompile(`\$({[a-zA-Z0-9_]+}|[a-zA-Z0-9_]+)`) - -func isSpace(r rune) bool { - switch r { - case ' ', '\t', '\r', '\n': - return true - } - return false -} - -func replaceEnv(s string) string { - return envRe.ReplaceAllStringFunc(s, func(s string) string { - s = s[1:] - if s[0] == '{' { - s = s[1 : len(s)-1] - } - return os.Getenv(s) - }) -} - -type Parser struct { - ParseEnv bool - ParseBacktick bool - Position int -} - -func NewParser() *Parser { - return &Parser{ParseEnv, ParseBacktick, 0} -} - -func (p *Parser) Parse(line string) ([]string, error) { - args := []string{} - buf := "" - var escaped, doubleQuoted, singleQuoted, backQuote bool - backtick := "" - - pos := -1 - got := false - -loop: - for i, r := range line { - if escaped { - buf += string(r) - escaped = false - continue - } - - if r == '\\' { - if singleQuoted { - buf += string(r) - } else { - escaped = true - } - continue - } - - if isSpace(r) { - if singleQuoted || doubleQuoted || backQuote { - buf += string(r) - backtick += string(r) - } else if got { - if p.ParseEnv { - buf = replaceEnv(buf) - } - args = append(args, buf) - buf = "" - got = false - } - continue - } - - switch r { - case '`': - if !singleQuoted && !doubleQuoted { - if p.ParseBacktick { - if backQuote { - out, err := shellRun(backtick) - if err != nil { - return nil, err - } - buf = out - } - backtick = "" - backQuote = !backQuote - continue - } - backtick = "" - backQuote = !backQuote - } - case '"': - if !singleQuoted { - doubleQuoted = !doubleQuoted - continue - } - case '\'': - if !doubleQuoted { - singleQuoted = !singleQuoted - continue - } - case ';', '&', '|', '<', '>': - if !(escaped || singleQuoted || doubleQuoted || backQuote) { - pos = i - break loop - } - } - - got = true - buf += string(r) - if backQuote { - backtick += string(r) - } - } - - if got { - if p.ParseEnv { - buf = replaceEnv(buf) - } - args = append(args, buf) - } - - if escaped || singleQuoted || doubleQuoted || backQuote { - return nil, errors.New("invalid command line string") - } - - p.Position = pos - - return args, nil -} - -func Parse(line string) ([]string, error) { - return NewParser().Parse(line) -} diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go deleted file mode 100644 index 4f8ac55e4..000000000 --- a/vendor/github.com/mattn/go-shellwords/util_posix.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package shellwords - -import ( - "errors" - "os" - "os/exec" - "strings" -) - -func shellRun(line string) (string, error) { - shell := os.Getenv("SHELL") - b, err := exec.Command(shell, "-c", line).Output() - if err != nil { - return "", errors.New(err.Error() + ":" + string(b)) - } - return strings.TrimSpace(string(b)), nil -} diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go deleted file mode 100644 index 7cad4cf06..000000000 --- a/vendor/github.com/mattn/go-shellwords/util_windows.go +++ /dev/null @@ -1,17 +0,0 @@ -package shellwords - -import ( - "errors" - "os" - "os/exec" - "strings" -) - -func shellRun(line string) (string, error) { - shell := os.Getenv("COMSPEC") - b, err := exec.Command(shell, "/c", line).Output() - if err != nil { - return "", errors.New(err.Error() + ":" + string(b)) - } - return strings.TrimSpace(string(b)), nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go deleted file mode 100644 index bf491c89c..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go +++ /dev/null @@ -1,111 +0,0 @@ -package user - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -var ( - // The current operating system does not provide the required data for user lookups. - ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") - // No matching entries found in file. - ErrNoPasswdEntries = errors.New("no matching entries in passwd file") - ErrNoGroupEntries = errors.New("no matching entries in group file") -) - -func lookupUser(filter func(u User) bool) (User, error) { - // Get operating system-specific passwd reader-closer. - passwd, err := GetPasswd() - if err != nil { - return User{}, err - } - defer passwd.Close() - - // Get the users. - users, err := ParsePasswdFilter(passwd, filter) - if err != nil { - return User{}, err - } - - // No user entries found. - if len(users) == 0 { - return User{}, ErrNoPasswdEntries - } - - // Assume the first entry is the "correct" one. - return users[0], nil -} - -// CurrentUser looks up the current user by their user id in /etc/passwd. If the -// user cannot be found (or there is no /etc/passwd file on the filesystem), -// then CurrentUser returns an error. -func CurrentUser() (User, error) { - return LookupUid(unix.Getuid()) -} - -// LookupUser looks up a user by their username in /etc/passwd. If the user -// cannot be found (or there is no /etc/passwd file on the filesystem), then -// LookupUser returns an error. -func LookupUser(username string) (User, error) { - return lookupUser(func(u User) bool { - return u.Name == username - }) -} - -// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot -// be found (or there is no /etc/passwd file on the filesystem), then LookupId -// returns an error. -func LookupUid(uid int) (User, error) { - return lookupUser(func(u User) bool { - return u.Uid == uid - }) -} - -func lookupGroup(filter func(g Group) bool) (Group, error) { - // Get operating system-specific group reader-closer. - group, err := GetGroup() - if err != nil { - return Group{}, err - } - defer group.Close() - - // Get the users. - groups, err := ParseGroupFilter(group, filter) - if err != nil { - return Group{}, err - } - - // No user entries found. - if len(groups) == 0 { - return Group{}, ErrNoGroupEntries - } - - // Assume the first entry is the "correct" one. - return groups[0], nil -} - -// CurrentGroup looks up the current user's group by their primary group id's -// entry in /etc/passwd. If the group cannot be found (or there is no -// /etc/group file on the filesystem), then CurrentGroup returns an error. -func CurrentGroup() (Group, error) { - return LookupGid(unix.Getgid()) -} - -// LookupGroup looks up a group by its name in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGroup -// returns an error. -func LookupGroup(groupname string) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Name == groupname - }) -} - -// LookupGid looks up a group by its group id in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGid -// returns an error. -func LookupGid(gid int) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Gid == gid - }) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go deleted file mode 100644 index 758b734c2..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package user - -import ( - "io" - "os" -) - -// Unix-specific path to the passwd and group formatted files. -const ( - unixPasswdPath = "/etc/passwd" - unixGroupPath = "/etc/group" -) - -func GetPasswdPath() (string, error) { - return unixPasswdPath, nil -} - -func GetPasswd() (io.ReadCloser, error) { - return os.Open(unixPasswdPath) -} - -func GetGroupPath() (string, error) { - return unixGroupPath, nil -} - -func GetGroup() (io.ReadCloser, error) { - return os.Open(unixGroupPath) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go deleted file mode 100644 index 721794887..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris - -package user - -import "io" - -func GetPasswdPath() (string, error) { - return "", ErrUnsupported -} - -func GetPasswd() (io.ReadCloser, error) { - return nil, ErrUnsupported -} - -func GetGroupPath() (string, error) { - return "", ErrUnsupported -} - -func GetGroup() (io.ReadCloser, error) { - return nil, ErrUnsupported -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go deleted file mode 100644 index 8962cab33..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ /dev/null @@ -1,441 +0,0 @@ -package user - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -const ( - minId = 0 - maxId = 1<<31 - 1 //for 32-bit systems compatibility -) - -var ( - ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minId, maxId) -) - -type User struct { - Name string - Pass string - Uid int - Gid int - Gecos string - Home string - Shell string -} - -type Group struct { - Name string - Pass string - Gid int - List []string -} - -func parseLine(line string, v ...interface{}) { - if line == "" { - return - } - - parts := strings.Split(line, ":") - for i, p := range parts { - // Ignore cases where we don't have enough fields to populate the arguments. - // Some configuration files like to misbehave. - if len(v) <= i { - break - } - - // Use the type of the argument to figure out how to parse it, scanf() style. - // This is legit. - switch e := v[i].(type) { - case *string: - *e = p - case *int: - // "numbers", with conversion errors ignored because of some misbehaving configuration files. - *e, _ = strconv.Atoi(p) - case *[]string: - // Comma-separated lists. - if p != "" { - *e = strings.Split(p, ",") - } else { - *e = []string{} - } - default: - // Someone goof'd when writing code using this function. Scream so they can hear us. - panic(fmt.Sprintf("parseLine only accepts {*string, *int, *[]string} as arguments! %#v is not a pointer!", e)) - } - } -} - -func ParsePasswdFile(path string) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswd(passwd) -} - -func ParsePasswd(passwd io.Reader) ([]User, error) { - return ParsePasswdFilter(passwd, nil) -} - -func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswdFilter(passwd, filter) -} - -func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { - if r == nil { - return nil, fmt.Errorf("nil source for passwd-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []User{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - line := strings.TrimSpace(s.Text()) - if line == "" { - continue - } - - // see: man 5 passwd - // name:password:UID:GID:GECOS:directory:shell - // Name:Pass:Uid:Gid:Gecos:Home:Shell - // root:x:0:0:root:/root:/bin/bash - // adm:x:3:4:adm:/var/adm:/bin/false - p := User{} - parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -func ParseGroupFile(path string) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - - defer group.Close() - return ParseGroup(group) -} - -func ParseGroup(group io.Reader) ([]Group, error) { - return ParseGroupFilter(group, nil) -} - -func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroupFilter(group, filter) -} - -func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { - if r == nil { - return nil, fmt.Errorf("nil source for group-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []Group{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := s.Text() - if text == "" { - continue - } - - // see: man 5 group - // group_name:password:GID:user_list - // Name:Pass:Gid:List - // root:x:0:root - // adm:x:4:root,adm,daemon - p := Group{} - parseLine(text, &p.Name, &p.Pass, &p.Gid, &p.List) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -type ExecUser struct { - Uid int - Gid int - Sgids []int - Home string -} - -// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the -// given file paths and uses that data as the arguments to GetExecUser. If the -// files cannot be opened for any reason, the error is ignored and a nil -// io.Reader is passed instead. -func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { - var passwd, group io.Reader - - if passwdFile, err := os.Open(passwdPath); err == nil { - passwd = passwdFile - defer passwdFile.Close() - } - - if groupFile, err := os.Open(groupPath); err == nil { - group = groupFile - defer groupFile.Close() - } - - return GetExecUser(userSpec, defaults, passwd, group) -} - -// GetExecUser parses a user specification string (using the passwd and group -// readers as sources for /etc/passwd and /etc/group data, respectively). In -// the case of blank fields or missing data from the sources, the values in -// defaults is used. -// -// GetExecUser will return an error if a user or group literal could not be -// found in any entry in passwd and group respectively. -// -// Examples of valid user specifications are: -// * "" -// * "user" -// * "uid" -// * "user:group" -// * "uid:gid -// * "user:gid" -// * "uid:group" -// -// It should be noted that if you specify a numeric user or group id, they will -// not be evaluated as usernames (only the metadata will be filled). So attempting -// to parse a user with user.Name = "1337" will produce the user with a UID of -// 1337. -func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { - if defaults == nil { - defaults = new(ExecUser) - } - - // Copy over defaults. - user := &ExecUser{ - Uid: defaults.Uid, - Gid: defaults.Gid, - Sgids: defaults.Sgids, - Home: defaults.Home, - } - - // Sgids slice *cannot* be nil. - if user.Sgids == nil { - user.Sgids = []int{} - } - - // Allow for userArg to have either "user" syntax, or optionally "user:group" syntax - var userArg, groupArg string - parseLine(userSpec, &userArg, &groupArg) - - // Convert userArg and groupArg to be numeric, so we don't have to execute - // Atoi *twice* for each iteration over lines. - uidArg, uidErr := strconv.Atoi(userArg) - gidArg, gidErr := strconv.Atoi(groupArg) - - // Find the matching user. - users, err := ParsePasswdFilter(passwd, func(u User) bool { - if userArg == "" { - // Default to current state of the user. - return u.Uid == user.Uid - } - - if uidErr == nil { - // If the userArg is numeric, always treat it as a UID. - return uidArg == u.Uid - } - - return u.Name == userArg - }) - - // If we can't find the user, we have to bail. - if err != nil && passwd != nil { - if userArg == "" { - userArg = strconv.Itoa(user.Uid) - } - return nil, fmt.Errorf("unable to find user %s: %v", userArg, err) - } - - var matchedUserName string - if len(users) > 0 { - // First match wins, even if there's more than one matching entry. - matchedUserName = users[0].Name - user.Uid = users[0].Uid - user.Gid = users[0].Gid - user.Home = users[0].Home - } else if userArg != "" { - // If we can't find a user with the given username, the only other valid - // option is if it's a numeric username with no associated entry in passwd. - - if uidErr != nil { - // Not numeric. - return nil, fmt.Errorf("unable to find user %s: %v", userArg, ErrNoPasswdEntries) - } - user.Uid = uidArg - - // Must be inside valid uid range. - if user.Uid < minId || user.Uid > maxId { - return nil, ErrRange - } - - // Okay, so it's numeric. We can just roll with this. - } - - // On to the groups. If we matched a username, we need to do this because of - // the supplementary group IDs. - if groupArg != "" || matchedUserName != "" { - groups, err := ParseGroupFilter(group, func(g Group) bool { - // If the group argument isn't explicit, we'll just search for it. - if groupArg == "" { - // Check if user is a member of this group. - for _, u := range g.List { - if u == matchedUserName { - return true - } - } - return false - } - - if gidErr == nil { - // If the groupArg is numeric, always treat it as a GID. - return gidArg == g.Gid - } - - return g.Name == groupArg - }) - if err != nil && group != nil { - return nil, fmt.Errorf("unable to find groups for spec %v: %v", matchedUserName, err) - } - - // Only start modifying user.Gid if it is in explicit form. - if groupArg != "" { - if len(groups) > 0 { - // First match wins, even if there's more than one matching entry. - user.Gid = groups[0].Gid - } else { - // If we can't find a group with the given name, the only other valid - // option is if it's a numeric group name with no associated entry in group. - - if gidErr != nil { - // Not numeric. - return nil, fmt.Errorf("unable to find group %s: %v", groupArg, ErrNoGroupEntries) - } - user.Gid = gidArg - - // Must be inside valid gid range. - if user.Gid < minId || user.Gid > maxId { - return nil, ErrRange - } - - // Okay, so it's numeric. We can just roll with this. - } - } else if len(groups) > 0 { - // Supplementary group ids only make sense if in the implicit form. - user.Sgids = make([]int, len(groups)) - for i, group := range groups { - user.Sgids[i] = group.Gid - } - } - } - - return user, nil -} - -// GetAdditionalGroups looks up a list of groups by name or group id -// against the given /etc/group formatted data. If a group name cannot -// be found, an error will be returned. If a group id cannot be found, -// or the given group data is nil, the id will be returned as-is -// provided it is in the legal range. -func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { - var groups = []Group{} - if group != nil { - var err error - groups, err = ParseGroupFilter(group, func(g Group) bool { - for _, ag := range additionalGroups { - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - return true - } - } - return false - }) - if err != nil { - return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err) - } - } - - gidMap := make(map[int]struct{}) - for _, ag := range additionalGroups { - var found bool - for _, g := range groups { - // if we found a matched group either by name or gid, take the - // first matched as correct - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - if _, ok := gidMap[g.Gid]; !ok { - gidMap[g.Gid] = struct{}{} - found = true - break - } - } - } - // we asked for a group but didn't find it. let's check to see - // if we wanted a numeric group - if !found { - gid, err := strconv.Atoi(ag) - if err != nil { - return nil, fmt.Errorf("Unable to find group %s", ag) - } - // Ensure gid is inside gid range. - if gid < minId || gid > maxId { - return nil, ErrRange - } - gidMap[gid] = struct{}{} - } - } - gids := []int{} - for gid := range gidMap { - gids = append(gids, gid) - } - return gids, nil -} - -// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups -// that opens the groupPath given and gives it as an argument to -// GetAdditionalGroups. -func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { - var group io.Reader - - if groupFile, err := os.Open(groupPath); err == nil { - group = groupFile - defer groupFile.Close() - } - return GetAdditionalGroups(additionalGroups, group) -} diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt deleted file mode 100644 index 298f0e266..000000000 --- a/vendor/github.com/spf13/cobra/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md deleted file mode 100644 index 6814a6d48..000000000 --- a/vendor/github.com/spf13/cobra/README.md +++ /dev/null @@ -1,928 +0,0 @@ -![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) - -Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. - -Many of the most widely used Go projects are built using Cobra including: - -* [Kubernetes](http://kubernetes.io/) -* [Hugo](http://gohugo.io) -* [rkt](https://github.com/coreos/rkt) -* [etcd](https://github.com/coreos/etcd) -* [Docker (distribution)](https://github.com/docker/distribution) -* [OpenShift](https://www.openshift.com/) -* [Delve](https://github.com/derekparker/delve) -* [GopherJS](http://www.gopherjs.org/) -* [CockroachDB](http://www.cockroachlabs.com/) -* [Bleve](http://www.blevesearch.com/) -* [ProjectAtomic (enterprise)](http://www.projectatomic.io/) -* [Parse (CLI)](https://parse.com/) -* [GiantSwarm's swarm](https://github.com/giantswarm/cli) -* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) - - -[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) -[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) -[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) - -![cobra](https://cloud.githubusercontent.com/assets/173412/10911369/84832a8e-8212-11e5-9f82-cc96660a4794.gif) - -# Overview - -Cobra is a library providing a simple interface to create powerful modern CLI -interfaces similar to git & go tools. - -Cobra is also an application that will generate your application scaffolding to rapidly -develop a Cobra-based application. - -Cobra provides: -* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. -* Fully POSIX-compliant flags (including short & long versions) -* Nested subcommands -* Global, local and cascading flags -* Easy generation of applications & commands with `cobra create appname` & `cobra add cmdname` -* Intelligent suggestions (`app srver`... did you mean `app server`?) -* Automatic help generation for commands and flags -* Automatic detailed help for `app help [command]` -* Automatic help flag recognition of `-h`, `--help`, etc. -* Automatically generated bash autocomplete for your application -* Automatically generated man pages for your application -* Command aliases so you can change things without breaking them -* The flexibilty to define your own help, usage, etc. -* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps - -Cobra has an exceptionally clean interface and simple design without needless -constructors or initialization methods. - -Applications built with Cobra commands are designed to be as user-friendly as -possible. Flags can be placed before or after the command (as long as a -confusing space isn’t provided). Both short and long flags can be used. A -command need not even be fully typed. Help is automatically generated and -available for the application or for a specific command using either the help -command or the `--help` flag. - -# Concepts - -Cobra is built on a structure of commands, arguments & flags. - -**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. - -The best applications will read like sentences when used. Users will know how -to use the application because they will natively understand how to use it. - -The pattern to follow is -`APPNAME VERB NOUN --ADJECTIVE.` - or -`APPNAME COMMAND ARG --FLAG` - -A few good real world examples may better illustrate this point. - -In the following example, 'server' is a command, and 'port' is a flag: - - > hugo server --port=1313 - -In this command we are telling Git to clone the url bare. - - > git clone URL --bare - -## Commands - -Command is the central point of the application. Each interaction that -the application supports will be contained in a Command. A command can -have children commands and optionally run an action. - -In the example above, 'server' is the command. - -A Command has the following structure: - -```go -type Command struct { - Use string // The one-line usage message. - Short string // The short description shown in the 'help' output. - Long string // The long message shown in the 'help ' output. - Run func(cmd *Command, args []string) // Run runs the command. -} -``` - -## Flags - -A Flag is a way to modify the behavior of a command. Cobra supports -fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). -A Cobra command can define flags that persist through to children commands -and flags that are only available to that command. - -In the example above, 'port' is the flag. - -Flag functionality is provided by the [pflag -library](https://github.com/ogier/pflag), a fork of the flag standard library -which maintains the same interface while adding POSIX compliance. - -## Usage - -Cobra works by creating a set of commands and then organizing them into a tree. -The tree defines the structure of the application. - -Once each command is defined with its corresponding flags, then the -tree is assigned to the commander which is finally executed. - -# Installing -Using Cobra is easy. First, use `go get` to install the latest version -of the library. This command will install the `cobra` generator executible -along with the library: - - > go get -v github.com/spf13/cobra/cobra - -Next, include Cobra in your application: - -```go -import "github.com/spf13/cobra" -``` - -# Getting Started - -While you are welcome to provide your own organization, typically a Cobra based -application will follow the following organizational structure. - -``` - â–¾ appName/ - â–¾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. - -```go -package main - -import "{pathToYourApp}/cmd" - -func main() { - if err := cmd.RootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(-1) - } -} -``` - -## Using the Cobra Generator - -Cobra provides its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -### cobra init - -The `cobra init [yourApp]` command will create your initial application code -for you. It is a very powerful application that will populate your program with -the right structure so you can immediately enjoy all the benefits of Cobra. It -will also automatically apply the license you specify to your application. - -Cobra init is pretty smart. You can provide it a full path, or simply a path -similar to what is expected in the import. - -``` -cobra init github.com/spf13/newAppName -``` - -### cobra add - -Once an application is initialized Cobra can create additional commands for you. -Let's say you created an app and you wanted the following commands for it: - -* app serve -* app config -* app config create - -In your project directory (where your main.go file is) you would run the following: - -``` -cobra add serve -cobra add config -cobra add create -p 'configCmd' -``` - -Once you have run these three commands you would have an app structure that would look like: - -``` - â–¾ app/ - â–¾ cmd/ - serve.go - config.go - create.go - main.go -``` - -at this point you can run `go run main.go` and it would run your app. `go run -main.go serve`, `go run main.go config`, `go run main.go config create` along -with `go run main.go help serve`, etc would all work. - -Obviously you haven't added your own code to these yet, the commands are ready -for you to give them their tasks. Have fun. - -### Configuring the cobra generator - -The cobra generator will be easier to use if you provide a simple configuration -file which will help you eliminate providing a bunch of repeated information in -flags over and over. - -An example ~/.cobra.yaml file: - -```yaml -author: Steve Francia -license: MIT -``` - -You can specify no license by setting `license` to `none` or you can specify -a custom license: - -```yaml -license: - header: This file is part of {{ .appName }}. - text: | - {{ .copyright }} - - This is my license. There are many like it, but this one is mine. - My license is my best friend. It is my life. I must master it as I must - master my life. -``` - -## Manually implementing Cobra - -To manually implement cobra you need to create a bare main.go file and a RootCmd file. -You will optionally provide additional commands as you see fit. - -### Create the root command - -The root command represents your binary itself. - - -#### Manually create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var RootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} -``` - -You will additionally define flags and handle configuration in your init() function. - -for example cmd/root.go: - -```go -func init() { - cobra.OnInitialize(initConfig) - RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") - RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") - RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") - RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") - viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase")) - viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. - -```go -package main - -import "{pathToYourApp}/cmd" - -func main() { - if err := cmd.RootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(-1) - } -} -``` - - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "github.com/spf13/cobra" -) - -func init() { - RootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -### Attach command to its parent - - -If you notice in the above example we attach the command to its parent. In -this case the parent is the rootCmd. In this example we are attaching it to the -root, but commands can be attached at any level. - -```go -RootCmd.AddCommand(versionCmd) -``` - -### Remove a command from its parent - -Removing a command is not a common action in simple programs, but it allows 3rd -parties to customize an existing command tree. - -In this example, we remove the existing `VersionCmd` command of an existing -root command, and we replace it with our own version: - -```go -mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd) -mainlib.RootCmd.AddCommand(versionCmd) -``` - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent' meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -RootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally which will only apply to that specific command. - -```go -RootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Positional Arguments - -Validation of positional arguments can be specified using the `Args` field, which accepts -one of the following values: - -- `NoArgs` - the command will report an error if there are any positional args. -- `ArbitraryArgs` - the command will accept any args. -- `OnlyValidArgs` - the command will report an error if there are any positiona - args that are not in the `ValidArgs` list. -- `MinimumNArgs(int)` - the command will report an error if there are not at - least N positional args. -- `MaximumNArgs(int)` - the command will report an error if there are more than - N positional args. -- `ExactArgs(int)` - the command will report an error if there are not - exactly N positional args. -- `RangeArgs(min, max)` - the command will report an error if the number of args - is not between the minimum and maximum number of expected args. - -By default, `Args` uses the following legacy behaviour: -- root commands with no subcommands can take arbitrary arguments -- root commands with subcommands will do subcommand validity checking -- subcommands will always accept arbitrary arguments and do no subsubcommand validity checking - - -```go -var HugoCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - ValidArgs: []string{"one", "two"} - Args: cobra.OnlyValidArgs - Run: func(cmd *cobra.Command, args []string) { - // args will only have the values one, two - // or the cmd.Execute() will fail. - }, -} -``` - -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. - For many years people have printed back to the screen. - `, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. - Echo works a lot like print, except it has a child command. - `, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [# times] [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing - a count and a string.`, - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). - -## The Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - > hugo help - - hugo is the main command, used to build your Hugo site. - - Hugo is a Fast and Flexible Static Site Generator - built with love by spf13 and friends in Go. - - Complete documentation is available at http://gohugo.io/. - - Usage: - hugo [flags] - hugo [command] - - Available Commands: - server Hugo runs its own webserver to render the files - version Print the version number of Hugo - config Print the site configuration - check Check content in the source directory - benchmark Benchmark hugo by building a site a number of times. - convert Convert your content to different formats - new Create new content for your site - list Listing out various types of content - undraft Undraft changes the content's draft status from 'True' to 'False' - genautocomplete Generate shell autocompletion script for Hugo - gendoc Generate Markdown documentation for the Hugo CLI. - genman Generate man page for Hugo - import Import your site from others. - - Flags: - -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ - -D, --buildDrafts[=false]: include content marked as draft - -F, --buildFuture[=false]: include content with publishdate in the future - --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ - --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL - --config="": config file (default is path/config.yaml|json|toml) - -d, --destination="": filesystem path to write files to - --disableRSS[=false]: Do not build RSS files - --disableSitemap[=false]: Do not build Sitemap file - --editor="": edit new content with this editor, if provided - --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it - --log[=false]: Enable Logging - --logFile="": Log File path (if set, logging enabled automatically) - --noTimes[=false]: Don't sync modification time of files - --pluralizeListTitles[=true]: Pluralize titles in lists using inflect - --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") - -s, --source="": filesystem path to read files relative from - --stepAnalysis[=false]: display memory and timing of different steps of the program - -t, --theme="": theme to use (located in /themes/THEMENAME/) - --uglyURLs[=false]: if true, use /filename.html instead of /filename/ - -v, --verbose[=false]: verbose output - --verboseLog[=false]: verbose logging - -w, --watch[=false]: watch filesystem for changes and recreate as needed - - Use "hugo [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use. - -The default help command is - -```go -func (c *Command) initHelp() { - if c.helpCommand == nil { - c.helpCommand = &Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. - Simply type ` + c.Name() + ` help [path to command] for full details.`, - Run: c.HelpFunc(), - } - } - c.AddCommand(c.helpCommand) -} -``` - -You can provide your own command, function or template through the following methods: - -```go -command.SetHelpCommand(cmd *Command) - -command.SetHelpFunc(f func(*Command, []string)) - -command.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - Usage: - hugo [flags] - hugo [command] - - Available Commands: - server Hugo runs its own webserver to render the files - version Print the version number of Hugo - config Print the site configuration - check Check content in the source directory - benchmark Benchmark hugo by building a site a number of times. - convert Convert your content to different formats - new Create new content for your site - list Listing out various types of content - undraft Undraft changes the content's draft status from 'True' to 'False' - genautocomplete Generate shell autocompletion script for Hugo - gendoc Generate Markdown documentation for the Hugo CLI. - genman Generate man page for Hugo - import Import your site from others. - - Flags: - -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ - -D, --buildDrafts[=false]: include content marked as draft - -F, --buildFuture[=false]: include content with publishdate in the future - --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ - --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL - --config="": config file (default is path/config.yaml|json|toml) - -d, --destination="": filesystem path to write files to - --disableRSS[=false]: Do not build RSS files - --disableSitemap[=false]: Do not build Sitemap file - --editor="": edit new content with this editor, if provided - --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it - --log[=false]: Enable Logging - --logFile="": Log File path (if set, logging enabled automatically) - --noTimes[=false]: Don't sync modification time of files - --pluralizeListTitles[=true]: Pluralize titles in lists using inflect - --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") - -s, --source="": filesystem path to read files relative from - --stepAnalysis[=false]: display memory and timing of different steps of the program - -t, --theme="": theme to use (located in /themes/THEMENAME/) - --uglyURLs[=false]: if true, use /filename.html instead of /filename/ - -v, --verbose[=false]: verbose output - --verboseLog[=false]: verbose logging - -w, --watch[=false]: watch filesystem for changes and recreate as needed - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. - -The default usage function is: - -```go -return func(c *Command) error { - err := tmpl(c.Out(), c.UsageTemplate(), c) - return err -} -``` - -Like help, the function and template are overridable through public methods: - -```go -command.SetUsageFunc(f func(*Command) error) - -command.SetUsageTemplate(s string) -``` - -## PreRun or PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - _ = rootCmd.Execute() - fmt.Print("\n") - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - _ = rootCmd.Execute() -} -``` - - -## Alternative Error Handling - -Cobra also has functions where the return signature is an error. This allows for errors to bubble up to the top, -providing a way to handle the errors in one location. The current list of functions that return an error is: - -* PersistentPreRunE -* PreRunE -* RunE -* PostRunE -* PersistentPostRunE - -If you would like to silence the default `error` and `usage` output in favor of your own, you can set `SilenceUsage` -and `SilenceErrors` to `false` on the command. A child command respects these flags if they are set on the parent -command. - -**Example Usage using RunE:** - -```go -package main - -import ( - "errors" - "log" - - "github.com/spf13/cobra" -) - -func main() { - var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - RunE: func(cmd *cobra.Command, args []string) error { - // Do Stuff Here - return errors.New("some random error") - }, - } - - if err := rootCmd.Execute(); err != nil { - log.Fatal(err) - } -} -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: - -```go -command.DisableSuggestions = true -``` - -or - -```go -command.SuggestionsMinimumDistance = 1 -``` - -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: - -``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. -``` - -## Generating Markdown-formatted documentation for your command - -Cobra can generate a Markdown-formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](doc/md_docs.md). - -## Generating man pages for your command - -Cobra can generate a man page based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Man Docs](doc/man_docs.md). - -## Generating bash completions for your command - -Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). - -## Debugging - -Cobra provides a ‘DebugFlags’ method on a command which, when called, will print -out everything Cobra knows about the flags for each command. - -### Example - -```go -command.DebugFlags() -``` - -## Release Notes -* **0.9.0** June 17, 2014 - * flags can appears anywhere in the args (provided they are unambiguous) - * --help prints usage screen for app or command - * Prefix matching for commands - * Cleaner looking help and usage output - * Extensive test suite -* **0.8.0** Nov 5, 2013 - * Reworked interface to remove commander completely - * Command now primary structure - * No initialization needed - * Usage & Help templates & functions definable at any level - * Updated Readme -* **0.7.0** Sept 24, 2013 - * Needs more eyes - * Test suite - * Support for automatic error messages - * Support for help command - * Support for printing to any io.Writer instead of os.Stderr - * Support for persistent flags which cascade down tree - * Ready for integration into Hugo -* **0.1.0** Sept 3, 2013 - * Implement first draft - -## Extensions - -Libraries for extending Cobra: - -* [cmdns](https://github.com/gosuri/cmdns): Enables name spacing a command's immediate children. It provides an alternative way to structure subcommands, similar to `heroku apps:create` and `ovrclk clusters:launch`. - -## ToDo -* Launch proper documentation site - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13), -[eparis](https://github.com/eparis), -[bep](https://github.com/bep), and many more! - -## License - -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) - - -[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge") diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go deleted file mode 100644 index 639fae7cd..000000000 --- a/vendor/github.com/spf13/cobra/args.go +++ /dev/null @@ -1,98 +0,0 @@ -package cobra - -import ( - "fmt" -) - -type PositionalArgs func(cmd *Command, args []string) error - -// Legacy arg validation has the following behaviour: -// - root commands with no subcommands can take arbitrary arguments -// - root commands with subcommands will do subcommand validity checking -// - subcommands will always accept arbitrary arguments -func legacyArgs(cmd *Command, args []string) error { - // no subcommand, always take args - if !cmd.HasSubCommands() { - return nil - } - - // root command with subcommands, do subcommand checking - if !cmd.HasParent() && len(args) > 0 { - return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0])) - } - return nil -} - -// NoArgs returns an error if any args are included -func NoArgs(cmd *Command, args []string) error { - if len(args) > 0 { - return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath()) - } - return nil -} - -// OnlyValidArgs returns an error if any args are not in the list of ValidArgs -func OnlyValidArgs(cmd *Command, args []string) error { - if len(cmd.ValidArgs) > 0 { - for _, v := range args { - if !stringInSlice(v, cmd.ValidArgs) { - return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0])) - } - } - } - return nil -} - -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -// ArbitraryArgs never returns an error -func ArbitraryArgs(cmd *Command, args []string) error { - return nil -} - -// MinimumNArgs returns an error if there is not at least N args -func MinimumNArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) < n { - return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args)) - } - return nil - } -} - -// MaximumNArgs returns an error if there are more than N args -func MaximumNArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) > n { - return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args)) - } - return nil - } -} - -// ExactArgs returns an error if there are not exactly n args -func ExactArgs(n int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) != n { - return fmt.Errorf("accepts %d arg(s), received %d", n, len(args)) - } - return nil - } -} - -// RangeArgs returns an error if the number of args is not within the expected range -func RangeArgs(min int, max int) PositionalArgs { - return func(cmd *Command, args []string) error { - if len(args) < min || len(args) > max { - return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args)) - } - return nil - } -} diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go deleted file mode 100644 index 236dee67f..000000000 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ /dev/null @@ -1,630 +0,0 @@ -package cobra - -import ( - "fmt" - "io" - "os" - "sort" - "strings" - - "github.com/spf13/pflag" -) - -const ( - BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extentions" - BashCompCustom = "cobra_annotation_bash_completion_custom" - BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" - BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" -) - -func preamble(out io.Writer, name string) error { - _, err := fmt.Fprintf(out, "# bash completion for %-36s -*- shell-script -*-\n", name) - if err != nil { - return err - } - _, err = fmt.Fprint(out, ` -__debug() -{ - if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then - echo "$*" >> "${BASH_COMP_DEBUG_FILE}" - fi -} - -# Homebrew on Macs have version 1.3 of bash-completion which doesn't include -# _init_completion. This is a very minimal version of that function. -__my_init_completion() -{ - COMPREPLY=() - _get_comp_words_by_ref "$@" cur prev words cword -} - -__index_of_word() -{ - local w word=$1 - shift - index=0 - for w in "$@"; do - [[ $w = "$word" ]] && return - index=$((index+1)) - done - index=-1 -} - -__contains_word() -{ - local w word=$1; shift - for w in "$@"; do - [[ $w = "$word" ]] && return - done - return 1 -} - -__handle_reply() -{ - __debug "${FUNCNAME[0]}" - case $cur in - -*) - if [[ $(type -t compopt) = "builtin" ]]; then - compopt -o nospace - fi - local allflags - if [ ${#must_have_one_flag[@]} -ne 0 ]; then - allflags=("${must_have_one_flag[@]}") - else - allflags=("${flags[*]} ${two_word_flags[*]}") - fi - COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) - if [[ $(type -t compopt) = "builtin" ]]; then - [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace - fi - - # complete after --flag=abc - if [[ $cur == *=* ]]; then - if [[ $(type -t compopt) = "builtin" ]]; then - compopt +o nospace - fi - - local index flag - flag="${cur%%=*}" - __index_of_word "${flag}" "${flags_with_completion[@]}" - if [[ ${index} -ge 0 ]]; then - COMPREPLY=() - PREFIX="" - cur="${cur#*=}" - ${flags_completion[${index}]} - if [ -n "${ZSH_VERSION}" ]; then - # zfs completion needs --flag= prefix - eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" - fi - fi - fi - return 0; - ;; - esac - - # check if we are handling a flag with special work handling - local index - __index_of_word "${prev}" "${flags_with_completion[@]}" - if [[ ${index} -ge 0 ]]; then - ${flags_completion[${index}]} - return - fi - - # we are parsing a flag and don't have a special handler, no completion - if [[ ${cur} != "${words[cword]}" ]]; then - return - fi - - local completions - completions=("${commands[@]}") - if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then - completions=("${must_have_one_noun[@]}") - fi - if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then - completions+=("${must_have_one_flag[@]}") - fi - COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) - - if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then - COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") ) - fi - - if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - declare -F __custom_func >/dev/null && __custom_func - fi - - __ltrim_colon_completions "$cur" -} - -# The arguments should be in the form "ext1|ext2|extn" -__handle_filename_extension_flag() -{ - local ext="$1" - _filedir "@(${ext})" -} - -__handle_subdirs_in_dir_flag() -{ - local dir="$1" - pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 -} - -__handle_flag() -{ - __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - # if a command required a flag, and we found it, unset must_have_one_flag() - local flagname=${words[c]} - local flagvalue - # if the word contained an = - if [[ ${words[c]} == *"="* ]]; then - flagvalue=${flagname#*=} # take in as flagvalue after the = - flagname=${flagname%%=*} # strip everything after the = - flagname="${flagname}=" # but put the = back - fi - __debug "${FUNCNAME[0]}: looking for ${flagname}" - if __contains_word "${flagname}" "${must_have_one_flag[@]}"; then - must_have_one_flag=() - fi - - # if you set a flag which only applies to this command, don't show subcommands - if __contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then - commands=() - fi - - # keep flag value with flagname as flaghash - if [ -n "${flagvalue}" ] ; then - flaghash[${flagname}]=${flagvalue} - elif [ -n "${words[ $((c+1)) ]}" ] ; then - flaghash[${flagname}]=${words[ $((c+1)) ]} - else - flaghash[${flagname}]="true" # pad "true" for bool flag - fi - - # skip the argument to a two word flag - if __contains_word "${words[c]}" "${two_word_flags[@]}"; then - c=$((c+1)) - # if we are looking for a flags value, don't show commands - if [[ $c -eq $cword ]]; then - commands=() - fi - fi - - c=$((c+1)) - -} - -__handle_noun() -{ - __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - if __contains_word "${words[c]}" "${must_have_one_noun[@]}"; then - must_have_one_noun=() - elif __contains_word "${words[c]}" "${noun_aliases[@]}"; then - must_have_one_noun=() - fi - - nouns+=("${words[c]}") - c=$((c+1)) -} - -__handle_command() -{ - __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - - local next_command - if [[ -n ${last_command} ]]; then - next_command="_${last_command}_${words[c]//:/__}" - else - if [[ $c -eq 0 ]]; then - next_command="_$(basename "${words[c]//:/__}")" - else - next_command="_${words[c]//:/__}" - fi - fi - c=$((c+1)) - __debug "${FUNCNAME[0]}: looking for ${next_command}" - declare -F $next_command >/dev/null && $next_command -} - -__handle_word() -{ - if [[ $c -ge $cword ]]; then - __handle_reply - return - fi - __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - if [[ "${words[c]}" == -* ]]; then - __handle_flag - elif __contains_word "${words[c]}" "${commands[@]}"; then - __handle_command - elif [[ $c -eq 0 ]] && __contains_word "$(basename "${words[c]}")" "${commands[@]}"; then - __handle_command - else - __handle_noun - fi - __handle_word -} - -`) - return err -} - -func postscript(w io.Writer, name string) error { - name = strings.Replace(name, ":", "__", -1) - _, err := fmt.Fprintf(w, "__start_%s()\n", name) - if err != nil { - return err - } - _, err = fmt.Fprintf(w, `{ - local cur prev words cword - declare -A flaghash 2>/dev/null || : - if declare -F _init_completion >/dev/null 2>&1; then - _init_completion -s || return - else - __my_init_completion -n "=" || return - fi - - local c=0 - local flags=() - local two_word_flags=() - local local_nonpersistent_flags=() - local flags_with_completion=() - local flags_completion=() - local commands=("%s") - local must_have_one_flag=() - local must_have_one_noun=() - local last_command - local nouns=() - - __handle_word -} - -`, name) - if err != nil { - return err - } - _, err = fmt.Fprintf(w, `if [[ $(type -t compopt) = "builtin" ]]; then - complete -o default -F __start_%s %s -else - complete -o default -o nospace -F __start_%s %s -fi - -`, name, name, name, name) - if err != nil { - return err - } - _, err = fmt.Fprintf(w, "# ex: ts=4 sw=4 et filetype=sh\n") - return err -} - -func writeCommands(cmd *Command, w io.Writer) error { - if _, err := fmt.Fprintf(w, " commands=()\n"); err != nil { - return err - } - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c == cmd.helpCommand { - continue - } - if _, err := fmt.Fprintf(w, " commands+=(%q)\n", c.Name()); err != nil { - return err - } - } - _, err := fmt.Fprintf(w, "\n") - return err -} - -func writeFlagHandler(name string, annotations map[string][]string, w io.Writer) error { - for key, value := range annotations { - switch key { - case BashCompFilenameExt: - _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) - if err != nil { - return err - } - - if len(value) > 0 { - ext := "__handle_filename_extension_flag " + strings.Join(value, "|") - _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) - } else { - ext := "_filedir" - _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) - } - if err != nil { - return err - } - case BashCompCustom: - _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) - if err != nil { - return err - } - if len(value) > 0 { - handlers := strings.Join(value, "; ") - _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", handlers) - } else { - _, err = fmt.Fprintf(w, " flags_completion+=(:)\n") - } - if err != nil { - return err - } - case BashCompSubdirsInDir: - _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) - - if len(value) == 1 { - ext := "__handle_subdirs_in_dir_flag " + value[0] - _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) - } else { - ext := "_filedir -d" - _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) - } - if err != nil { - return err - } - } - } - return nil -} - -func writeShortFlag(flag *pflag.Flag, w io.Writer) error { - b := (len(flag.NoOptDefVal) > 0) - name := flag.Shorthand - format := " " - if !b { - format += "two_word_" - } - format += "flags+=(\"-%s\")\n" - if _, err := fmt.Fprintf(w, format, name); err != nil { - return err - } - return writeFlagHandler("-"+name, flag.Annotations, w) -} - -func writeFlag(flag *pflag.Flag, w io.Writer) error { - b := (len(flag.NoOptDefVal) > 0) - name := flag.Name - format := " flags+=(\"--%s" - if !b { - format += "=" - } - format += "\")\n" - if _, err := fmt.Fprintf(w, format, name); err != nil { - return err - } - return writeFlagHandler("--"+name, flag.Annotations, w) -} - -func writeLocalNonPersistentFlag(flag *pflag.Flag, w io.Writer) error { - b := (len(flag.NoOptDefVal) > 0) - name := flag.Name - format := " local_nonpersistent_flags+=(\"--%s" - if !b { - format += "=" - } - format += "\")\n" - if _, err := fmt.Fprintf(w, format, name); err != nil { - return err - } - return nil -} - -func writeFlags(cmd *Command, w io.Writer) error { - _, err := fmt.Fprintf(w, ` flags=() - two_word_flags=() - local_nonpersistent_flags=() - flags_with_completion=() - flags_completion=() - -`) - if err != nil { - return err - } - localNonPersistentFlags := cmd.LocalNonPersistentFlags() - var visitErr error - cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - if err := writeFlag(flag, w); err != nil { - visitErr = err - return - } - if len(flag.Shorthand) > 0 { - if err := writeShortFlag(flag, w); err != nil { - visitErr = err - return - } - } - if localNonPersistentFlags.Lookup(flag.Name) != nil { - if err := writeLocalNonPersistentFlag(flag, w); err != nil { - visitErr = err - return - } - } - }) - if visitErr != nil { - return visitErr - } - cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { - if err := writeFlag(flag, w); err != nil { - visitErr = err - return - } - if len(flag.Shorthand) > 0 { - if err := writeShortFlag(flag, w); err != nil { - visitErr = err - return - } - } - }) - if visitErr != nil { - return visitErr - } - - _, err = fmt.Fprintf(w, "\n") - return err -} - -func writeRequiredFlag(cmd *Command, w io.Writer) error { - if _, err := fmt.Fprintf(w, " must_have_one_flag=()\n"); err != nil { - return err - } - flags := cmd.NonInheritedFlags() - var visitErr error - flags.VisitAll(func(flag *pflag.Flag) { - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - b := (flag.Value.Type() == "bool") - if !b { - format += "=" - } - format += "\")\n" - if _, err := fmt.Fprintf(w, format, flag.Name); err != nil { - visitErr = err - return - } - - if len(flag.Shorthand) > 0 { - if _, err := fmt.Fprintf(w, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand); err != nil { - visitErr = err - return - } - } - } - } - }) - return visitErr -} - -func writeRequiredNouns(cmd *Command, w io.Writer) error { - if _, err := fmt.Fprintf(w, " must_have_one_noun=()\n"); err != nil { - return err - } - sort.Sort(sort.StringSlice(cmd.ValidArgs)) - for _, value := range cmd.ValidArgs { - if _, err := fmt.Fprintf(w, " must_have_one_noun+=(%q)\n", value); err != nil { - return err - } - } - return nil -} - -func writeArgAliases(cmd *Command, w io.Writer) error { - if _, err := fmt.Fprintf(w, " noun_aliases=()\n"); err != nil { - return err - } - sort.Sort(sort.StringSlice(cmd.ArgAliases)) - for _, value := range cmd.ArgAliases { - if _, err := fmt.Fprintf(w, " noun_aliases+=(%q)\n", value); err != nil { - return err - } - } - return nil -} - -func gen(cmd *Command, w io.Writer) error { - for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c == cmd.helpCommand { - continue - } - if err := gen(c, w); err != nil { - return err - } - } - commandName := cmd.CommandPath() - commandName = strings.Replace(commandName, " ", "_", -1) - commandName = strings.Replace(commandName, ":", "__", -1) - if _, err := fmt.Fprintf(w, "_%s()\n{\n", commandName); err != nil { - return err - } - if _, err := fmt.Fprintf(w, " last_command=%q\n", commandName); err != nil { - return err - } - if err := writeCommands(cmd, w); err != nil { - return err - } - if err := writeFlags(cmd, w); err != nil { - return err - } - if err := writeRequiredFlag(cmd, w); err != nil { - return err - } - if err := writeRequiredNouns(cmd, w); err != nil { - return err - } - if err := writeArgAliases(cmd, w); err != nil { - return err - } - if _, err := fmt.Fprintf(w, "}\n\n"); err != nil { - return err - } - return nil -} - -func (cmd *Command) GenBashCompletion(w io.Writer) error { - if err := preamble(w, cmd.Name()); err != nil { - return err - } - if len(cmd.BashCompletionFunction) > 0 { - if _, err := fmt.Fprintf(w, "%s\n", cmd.BashCompletionFunction); err != nil { - return err - } - } - if err := gen(cmd, w); err != nil { - return err - } - return postscript(w, cmd.Name()) -} - -func (cmd *Command) GenBashCompletionFile(filename string) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return cmd.GenBashCompletion(outFile) -} - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag, if it exists. -func (cmd *Command) MarkFlagRequired(name string) error { - return MarkFlagRequired(cmd.Flags(), name) -} - -// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag, if it exists. -func (cmd *Command) MarkPersistentFlagRequired(name string) error { - return MarkFlagRequired(cmd.PersistentFlags(), name) -} - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag in the flag set, if it exists. -func MarkFlagRequired(flags *pflag.FlagSet, name string) error { - return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) -} - -// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func (cmd *Command) MarkFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(cmd.Flags(), name, extensions...) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. -// Generated bash autocompletion will call the bash function f for the flag. -func (cmd *Command) MarkFlagCustom(name string, f string) error { - return MarkFlagCustom(cmd.Flags(), name, f) -} - -// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func (cmd *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(cmd.PersistentFlags(), name, extensions...) -} - -// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { - return flags.SetAnnotation(name, BashCompFilenameExt, extensions) -} - -// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists. -// Generated bash autocompletion will call the bash function f for the flag. -func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { - return flags.SetAnnotation(name, BashCompCustom, []string{f}) -} diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go deleted file mode 100644 index 93a2c0f3a..000000000 --- a/vendor/github.com/spf13/cobra/cobra.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright © 2013 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Commands similar to git, go tools and other modern CLI tools -// inspired by go, go-Commander, gh and subcommand - -package cobra - -import ( - "fmt" - "io" - "reflect" - "strconv" - "strings" - "text/template" - "unicode" -) - -var templateFuncs = template.FuncMap{ - "trim": strings.TrimSpace, - "trimRightSpace": trimRightSpace, - "appendIfNotPresent": appendIfNotPresent, - "rpad": rpad, - "gt": Gt, - "eq": Eq, -} - -var initializers []func() - -// automatic prefix matching can be a dangerous thing to automatically enable in CLI tools. -// Set this to true to enable it -var EnablePrefixMatching = false - -//EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. -//To disable sorting, set it to false. -var EnableCommandSorting = true - -//AddTemplateFunc adds a template function that's available to Usage and Help -//template generation. -func AddTemplateFunc(name string, tmplFunc interface{}) { - templateFuncs[name] = tmplFunc -} - -//AddTemplateFuncs adds multiple template functions availalble to Usage and -//Help template generation. -func AddTemplateFuncs(tmplFuncs template.FuncMap) { - for k, v := range tmplFuncs { - templateFuncs[k] = v - } -} - -//OnInitialize takes a series of func() arguments and appends them to a slice of func(). -func OnInitialize(y ...func()) { - for _, x := range y { - initializers = append(initializers, x) - } -} - -//Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, -//Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as -//ints and then compared. -func Gt(a interface{}, b interface{}) bool { - var left, right int64 - av := reflect.ValueOf(a) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - left = int64(av.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - left = av.Int() - case reflect.String: - left, _ = strconv.ParseInt(av.String(), 10, 64) - } - - bv := reflect.ValueOf(b) - - switch bv.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - right = int64(bv.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - right = bv.Int() - case reflect.String: - right, _ = strconv.ParseInt(bv.String(), 10, 64) - } - - return left > right -} - -//Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. -func Eq(a interface{}, b interface{}) bool { - av := reflect.ValueOf(a) - bv := reflect.ValueOf(b) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - panic("Eq called on unsupported type") - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return av.Int() == bv.Int() - case reflect.String: - return av.String() == bv.String() - } - return false -} - -func trimRightSpace(s string) string { - return strings.TrimRightFunc(s, unicode.IsSpace) -} - -// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s -func appendIfNotPresent(s, stringToAppend string) string { - if strings.Contains(s, stringToAppend) { - return s - } - return s + " " + stringToAppend -} - -//rpad adds padding to the right of a string -func rpad(s string, padding int) string { - template := fmt.Sprintf("%%-%ds", padding) - return fmt.Sprintf(template, s) -} - -// tmpl executes the given template text on data, writing the result to w. -func tmpl(w io.Writer, text string, data interface{}) error { - t := template.New("top") - t.Funcs(templateFuncs) - template.Must(t.Parse(text)) - return t.Execute(w, data) -} - -// ld compares two strings and returns the levenshtein distance between them -func ld(s, t string, ignoreCase bool) int { - if ignoreCase { - s = strings.ToLower(s) - t = strings.ToLower(t) - } - d := make([][]int, len(s)+1) - for i := range d { - d[i] = make([]int, len(t)+1) - } - for i := range d { - d[i][0] = i - } - for j := range d[0] { - d[0][j] = j - } - for j := 1; j <= len(t); j++ { - for i := 1; i <= len(s); i++ { - if s[i-1] == t[j-1] { - d[i][j] = d[i-1][j-1] - } else { - min := d[i-1][j] - if d[i][j-1] < min { - min = d[i][j-1] - } - if d[i-1][j-1] < min { - min = d[i-1][j-1] - } - d[i][j] = min + 1 - } - } - - } - return d[len(s)][len(t)] -} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go deleted file mode 100644 index 288720862..000000000 --- a/vendor/github.com/spf13/cobra/command.go +++ /dev/null @@ -1,1359 +0,0 @@ -// Copyright © 2013 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. -//In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. -package cobra - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - - flag "github.com/spf13/pflag" -) - -// Command is just that, a command for your application. -// eg. 'go run' ... 'run' is the command. Cobra requires -// you to define the usage and description as part of your command -// definition to ensure usability. -type Command struct { - // Name is the command name, usually the executable's name. - name string - // The one-line usage message. - Use string - // An array of aliases that can be used instead of the first word in Use. - Aliases []string - // An array of command names for which this command will be suggested - similar to aliases but only suggests. - SuggestFor []string - // The short description shown in the 'help' output. - Short string - // The long message shown in the 'help ' output. - Long string - // Examples of how to use the command - Example string - // List of all valid non-flag arguments that are accepted in bash completions - ValidArgs []string - // List of aliases for ValidArgs. These are not suggested to the user in the bash - // completion, but accepted if entered manually. - ArgAliases []string - // Expected arguments - Args PositionalArgs - // Custom functions used by the bash autocompletion generator - BashCompletionFunction string - // Is this command deprecated and should print this string when used? - Deprecated string - // Is this command hidden and should NOT show up in the list of available commands? - Hidden bool - // Tags are key/value pairs that can be used by applications to identify or - // group commands - Tags map[string]string - // Full set of flags - flags *flag.FlagSet - // Set of flags childrens of this command will inherit - pflags *flag.FlagSet - // Flags that are declared specifically by this command (not inherited). - lflags *flag.FlagSet - // SilenceErrors is an option to quiet errors down stream - SilenceErrors bool - // Silence Usage is an option to silence usage when an error occurs. - SilenceUsage bool - // The *Run functions are executed in the following order: - // * PersistentPreRun() - // * PreRun() - // * Run() - // * PostRun() - // * PersistentPostRun() - // All functions get the same args, the arguments after the command name - // PersistentPreRun: children of this command will inherit and execute - PersistentPreRun func(cmd *Command, args []string) - // PersistentPreRunE: PersistentPreRun but returns an error - PersistentPreRunE func(cmd *Command, args []string) error - // PreRun: children of this command will not inherit. - PreRun func(cmd *Command, args []string) - // PreRunE: PreRun but returns an error - PreRunE func(cmd *Command, args []string) error - // Run: Typically the actual work function. Most commands will only implement this - Run func(cmd *Command, args []string) - // RunE: Run but returns an error - RunE func(cmd *Command, args []string) error - // PostRun: run after the Run command. - PostRun func(cmd *Command, args []string) - // PostRunE: PostRun but returns an error - PostRunE func(cmd *Command, args []string) error - // PersistentPostRun: children of this command will inherit and execute after PostRun - PersistentPostRun func(cmd *Command, args []string) - // PersistentPostRunE: PersistentPostRun but returns an error - PersistentPostRunE func(cmd *Command, args []string) error - // DisableAutoGenTag remove - DisableAutoGenTag bool - // Commands is the list of commands supported by this program. - commands []*Command - // Parent Command for this command - parent *Command - // max lengths of commands' string lengths for use in padding - commandsMaxUseLen int - commandsMaxCommandPathLen int - commandsMaxNameLen int - // is commands slice are sorted or not - commandsAreSorted bool - - flagErrorBuf *bytes.Buffer - - args []string // actual args parsed from flags - output *io.Writer // nil means stderr; use Out() method instead - usageFunc func(*Command) error // Usage can be defined by application - usageTemplate string // Can be defined by Application - flagErrorFunc func(*Command, error) error - helpTemplate string // Can be defined by Application - helpFunc func(*Command, []string) // Help can be defined by application - helpCommand *Command // The help command - // The global normalization function that we can use on every pFlag set and children commands - globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName - - // Disable the suggestions based on Levenshtein distance that go along with 'unknown command' messages - DisableSuggestions bool - // If displaying suggestions, allows to set the minimum levenshtein distance to display, must be > 0 - SuggestionsMinimumDistance int - - // Disable the flag parsing. If this is true all flags will be passed to the command as arguments. - DisableFlagParsing bool - - // TraverseChildren parses flags on all parents before executing child command - TraverseChildren bool -} - -// os.Args[1:] by default, if desired, can be overridden -// particularly useful when testing. -func (c *Command) SetArgs(a []string) { - c.args = a -} - -func (c *Command) getOut(def io.Writer) io.Writer { - if c.output != nil { - return *c.output - } - - if c.HasParent() { - return c.parent.Out() - } - return def -} - -func (c *Command) Out() io.Writer { - return c.getOut(os.Stderr) -} - -func (c *Command) getOutOrStdout() io.Writer { - return c.getOut(os.Stdout) -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (c *Command) SetOutput(output io.Writer) { - c.output = &output -} - -// Usage can be defined by application -func (c *Command) SetUsageFunc(f func(*Command) error) { - c.usageFunc = f -} - -// Can be defined by Application -func (c *Command) SetUsageTemplate(s string) { - c.usageTemplate = s -} - -// SetFlagErrorFunc sets a function to generate an error when flag parsing -// fails -func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) { - c.flagErrorFunc = f -} - -// Can be defined by Application -func (c *Command) SetHelpFunc(f func(*Command, []string)) { - c.helpFunc = f -} - -func (c *Command) SetHelpCommand(cmd *Command) { - c.helpCommand = cmd -} - -// Can be defined by Application -func (c *Command) SetHelpTemplate(s string) { - c.helpTemplate = s -} - -// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. -// The user should not have a cyclic dependency on commands. -func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { - c.Flags().SetNormalizeFunc(n) - c.PersistentFlags().SetNormalizeFunc(n) - c.globNormFunc = n - - for _, command := range c.commands { - command.SetGlobalNormalizationFunc(n) - } -} - -func (c *Command) UsageFunc() (f func(*Command) error) { - if c.usageFunc != nil { - return c.usageFunc - } - - if c.HasParent() { - return c.parent.UsageFunc() - } - return func(c *Command) error { - err := tmpl(c.Out(), c.UsageTemplate(), c) - if err != nil { - fmt.Print(err) - } - return err - } -} - -// HelpFunc returns either the function set by SetHelpFunc for this command -// or a parent, or it returns a function which calls c.Help() -func (c *Command) HelpFunc() func(*Command, []string) { - cmd := c - for cmd != nil { - if cmd.helpFunc != nil { - return cmd.helpFunc - } - cmd = cmd.parent - } - return func(*Command, []string) { - err := c.Help() - if err != nil { - c.Println(err) - } - } -} - -// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this -// command or a parent, or it returns a function which returns the original -// error. -func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { - if c.flagErrorFunc != nil { - return c.flagErrorFunc - } - - if c.HasParent() { - return c.parent.FlagErrorFunc() - } - return func(c *Command, err error) error { - return err - } -} - -var minUsagePadding = 25 - -func (c *Command) UsagePadding() int { - if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { - return minUsagePadding - } - return c.parent.commandsMaxUseLen -} - -var minCommandPathPadding = 11 - -// -func (c *Command) CommandPathPadding() int { - if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { - return minCommandPathPadding - } - return c.parent.commandsMaxCommandPathLen -} - -var minNamePadding = 11 - -func (c *Command) NamePadding() int { - if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { - return minNamePadding - } - return c.parent.commandsMaxNameLen -} - -func (c *Command) UsageTemplate() string { - if c.usageTemplate != "" { - return c.usageTemplate - } - - if c.HasParent() { - return c.parent.UsageTemplate() - } - return `Usage:{{if .Runnable}} - {{if .HasAvailableFlags}}{{appendIfNotPresent .UseLine "[flags]"}}{{else}}{{.UseLine}}{{end}}{{end}}{{if .HasAvailableSubCommands}} - {{ .CommandPath}} [command]{{end}}{{if gt .Aliases 0}} - -Aliases: - {{.NameAndAliases}} -{{end}}{{if .HasExample}} - -Examples: -{{ .Example }}{{end}}{{ if .HasAvailableSubCommands}} - -Available Commands:{{range .Commands}}{{if .IsAvailableCommand}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}} - -Flags: -{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasAvailableInheritedFlags}} - -Global Flags: -{{.InheritedFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasHelpSubCommands}} - -Additional help topics:{{range .Commands}}{{if .IsHelpCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableSubCommands }} - -Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} -` -} - -func (c *Command) HelpTemplate() string { - if c.helpTemplate != "" { - return c.helpTemplate - } - - if c.HasParent() { - return c.parent.HelpTemplate() - } - return `{{with or .Long .Short }}{{. | trim}} - -{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` -} - -// Really only used when casting a command to a commander -func (c *Command) resetChildrensParents() { - for _, x := range c.commands { - x.parent = c - } -} - -// Test if the named flag is a boolean flag. -func isBooleanFlag(name string, f *flag.FlagSet) bool { - flag := f.Lookup(name) - if flag == nil { - return false - } - return flag.Value.Type() == "bool" -} - -// Test if the named flag is a boolean flag. -func isBooleanShortFlag(name string, f *flag.FlagSet) bool { - result := false - f.VisitAll(func(f *flag.Flag) { - if f.Shorthand == name && f.Value.Type() == "bool" { - result = true - } - }) - return result -} - -func stripFlags(args []string, c *Command) []string { - if len(args) < 1 { - return args - } - c.mergePersistentFlags() - - commands := []string{} - - inQuote := false - inFlag := false - for _, y := range args { - if !inQuote { - switch { - case strings.HasPrefix(y, "\""): - inQuote = true - case strings.Contains(y, "=\""): - inQuote = true - case strings.HasPrefix(y, "--") && !strings.Contains(y, "="): - // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' - inFlag = !isBooleanFlag(y[2:], c.Flags()) - case strings.HasPrefix(y, "-") && !strings.Contains(y, "=") && len(y) == 2 && !isBooleanShortFlag(y[1:], c.Flags()): - inFlag = true - case inFlag: - inFlag = false - case y == "": - // strip empty commands, as the go tests expect this to be ok.... - case !strings.HasPrefix(y, "-"): - commands = append(commands, y) - inFlag = false - } - } - - if strings.HasSuffix(y, "\"") && !strings.HasSuffix(y, "\\\"") { - inQuote = false - } - } - - return commands -} - -// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like -// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). -func argsMinusFirstX(args []string, x string) []string { - for i, y := range args { - if x == y { - ret := []string{} - ret = append(ret, args[:i]...) - ret = append(ret, args[i+1:]...) - return ret - } - } - return args -} - -func isFlagArg(arg string) bool { - return ((len(arg) >= 3 && arg[1] == '-') || - (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) -} - -// Find the target command given the args and command tree -// Meant to be run on the highest node. Only searches down. -func (c *Command) Find(args []string) (*Command, []string, error) { - var innerfind func(*Command, []string) (*Command, []string) - - innerfind = func(c *Command, innerArgs []string) (*Command, []string) { - argsWOflags := stripFlags(innerArgs, c) - if len(argsWOflags) == 0 { - return c, innerArgs - } - nextSubCmd := argsWOflags[0] - - cmd := c.findNext(nextSubCmd) - if cmd != nil { - return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) - } - return c, innerArgs - } - - commandFound, a := innerfind(c, args) - if commandFound.Args == nil { - return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound)) - } - return commandFound, a, nil -} - -func (c *Command) findNext(next string) *Command { - matches := make([]*Command, 0) - for _, cmd := range c.commands { - if cmd.Name() == next || cmd.HasAlias(next) { - return cmd - } - if EnablePrefixMatching && cmd.HasNameOrAliasPrefix(next) { - matches = append(matches, cmd) - } - } - - if len(matches) == 1 { - return matches[0] - } - return nil -} - -// Traverse the command tree to find the command, and parse args for -// each parent. -func (c *Command) Traverse(args []string) (*Command, []string, error) { - flags := []string{} - inFlag := false - - for i, arg := range args { - switch { - // A long flag with a space separated value - case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="): - // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' - inFlag = !isBooleanFlag(arg[2:], c.Flags()) - flags = append(flags, arg) - continue - // A short flag with a space separated value - case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !isBooleanShortFlag(arg[1:], c.Flags()): - inFlag = true - flags = append(flags, arg) - continue - // The value for a flag - case inFlag: - inFlag = false - flags = append(flags, arg) - continue - // A flag without a value, or with an `=` separated value - case isFlagArg(arg): - flags = append(flags, arg) - continue - } - - cmd := c.findNext(arg) - if cmd == nil { - return c, args, nil - } - - if err := c.ParseFlags(flags); err != nil { - return nil, args, err - } - return cmd.Traverse(args[i+1:]) - } - return c, args, nil -} - -func (c *Command) findSuggestions(arg string) string { - if c.DisableSuggestions { - return "" - } - if c.SuggestionsMinimumDistance <= 0 { - c.SuggestionsMinimumDistance = 2 - } - suggestionsString := "" - if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" - for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) - } - } - return suggestionsString -} - -func (c *Command) SuggestionsFor(typedName string) []string { - suggestions := []string{} - for _, cmd := range c.commands { - if cmd.IsAvailableCommand() { - levenshteinDistance := ld(typedName, cmd.Name(), true) - suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance - suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName)) - if suggestByLevenshtein || suggestByPrefix { - suggestions = append(suggestions, cmd.Name()) - } - for _, explicitSuggestion := range cmd.SuggestFor { - if strings.EqualFold(typedName, explicitSuggestion) { - suggestions = append(suggestions, cmd.Name()) - } - } - } - } - return suggestions -} - -func (c *Command) VisitParents(fn func(*Command)) { - var traverse func(*Command) *Command - - traverse = func(x *Command) *Command { - if x != c { - fn(x) - } - if x.HasParent() { - return traverse(x.parent) - } - return x - } - traverse(c) -} - -func (c *Command) Root() *Command { - var findRoot func(*Command) *Command - - findRoot = func(x *Command) *Command { - if x.HasParent() { - return findRoot(x.parent) - } - return x - } - - return findRoot(c) -} - -// ArgsLenAtDash will return the length of f.Args at the moment when a -- was -// found during arg parsing. This allows your program to know which args were -// before the -- and which came after. (Description from -// https://godoc.org/github.com/spf13/pflag#FlagSet.ArgsLenAtDash). -func (c *Command) ArgsLenAtDash() int { - return c.Flags().ArgsLenAtDash() -} - -func (c *Command) execute(a []string) (err error) { - if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") - } - - if len(c.Deprecated) > 0 { - c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) - } - - // initialize help flag as the last point possible to allow for user - // overriding - c.initHelpFlag() - - err = c.ParseFlags(a) - if err != nil { - return c.FlagErrorFunc()(c, err) - } - // If help is called, regardless of other flags, return we want help - // Also say we need help if the command isn't runnable. - helpVal, err := c.Flags().GetBool("help") - if err != nil { - // should be impossible to get here as we always declare a help - // flag in initHelpFlag() - c.Println("\"help\" flag declared as non-bool. Please correct your code") - return err - } - if helpVal || !c.Runnable() { - return flag.ErrHelp - } - - c.preRun() - - argWoFlags := c.Flags().Args() - if c.DisableFlagParsing { - argWoFlags = a - } - - if err := c.ValidateArgs(argWoFlags); err != nil { - return err - } - - for p := c; p != nil; p = p.Parent() { - if p.PersistentPreRunE != nil { - if err := p.PersistentPreRunE(c, argWoFlags); err != nil { - return err - } - break - } else if p.PersistentPreRun != nil { - p.PersistentPreRun(c, argWoFlags) - break - } - } - if c.PreRunE != nil { - if err := c.PreRunE(c, argWoFlags); err != nil { - return err - } - } else if c.PreRun != nil { - c.PreRun(c, argWoFlags) - } - - if c.RunE != nil { - if err := c.RunE(c, argWoFlags); err != nil { - return err - } - } else { - c.Run(c, argWoFlags) - } - if c.PostRunE != nil { - if err := c.PostRunE(c, argWoFlags); err != nil { - return err - } - } else if c.PostRun != nil { - c.PostRun(c, argWoFlags) - } - for p := c; p != nil; p = p.Parent() { - if p.PersistentPostRunE != nil { - if err := p.PersistentPostRunE(c, argWoFlags); err != nil { - return err - } - break - } else if p.PersistentPostRun != nil { - p.PersistentPostRun(c, argWoFlags) - break - } - } - - return nil -} - -func (c *Command) preRun() { - for _, x := range initializers { - x() - } -} - -func (c *Command) errorMsgFromParse() string { - s := c.flagErrorBuf.String() - - x := strings.Split(s, "\n") - - if len(x) > 0 { - return x[0] - } - return "" -} - -// Call execute to use the args (os.Args[1:] by default) -// and run through the command tree finding appropriate matches -// for commands and then corresponding flags. -func (c *Command) Execute() error { - _, err := c.ExecuteC() - return err -} - -func (c *Command) ExecuteC() (cmd *Command, err error) { - - // Regardless of what command execute is called on, run on Root only - if c.HasParent() { - return c.Root().ExecuteC() - } - - // windows hook - if preExecHookFn != nil { - preExecHookFn(c) - } - - // initialize help as the last point possible to allow for user - // overriding - c.initHelpCmd() - - var args []string - - // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 - if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { - args = os.Args[1:] - } else { - args = c.args - } - - var flags []string - if c.TraverseChildren { - cmd, flags, err = c.Traverse(args) - } else { - cmd, flags, err = c.Find(args) - } - if err != nil { - // If found parse to a subcommand and then failed, talk about the subcommand - if cmd != nil { - c = cmd - } - if !c.SilenceErrors { - c.Println("Error:", err.Error()) - c.Printf("Run '%v --help' for usage.\n", c.CommandPath()) - } - return c, err - } - - err = cmd.execute(flags) - if err != nil { - // Always show help if requested, even if SilenceErrors is in - // effect - if err == flag.ErrHelp { - cmd.HelpFunc()(cmd, args) - return cmd, nil - } - - // If root command has SilentErrors flagged, - // all subcommands should respect it - if !cmd.SilenceErrors && !c.SilenceErrors { - c.Println("Error:", err.Error()) - } - - // If root command has SilentUsage flagged, - // all subcommands should respect it - if !cmd.SilenceUsage && !c.SilenceUsage { - c.Println(cmd.UsageString()) - } - return cmd, err - } - return cmd, nil -} - -func (c *Command) ValidateArgs(args []string) error { - if c.Args == nil { - return nil - } - return c.Args(c, args) -} - -func (c *Command) initHelpFlag() { - c.mergePersistentFlags() - if c.Flags().Lookup("help") == nil { - c.Flags().BoolP("help", "h", false, "help for "+c.Name()) - } -} - -func (c *Command) initHelpCmd() { - if c.helpCommand == nil { - if !c.HasSubCommands() { - return - } - - c.helpCommand = &Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. - Simply type ` + c.Name() + ` help [path to command] for full details.`, - PersistentPreRun: func(cmd *Command, args []string) {}, - PersistentPostRun: func(cmd *Command, args []string) {}, - - Run: func(c *Command, args []string) { - cmd, _, e := c.Root().Find(args) - if cmd == nil || e != nil { - c.Printf("Unknown help topic %#q.", args) - c.Root().Usage() - } else { - helpFunc := cmd.HelpFunc() - helpFunc(cmd, args) - } - }, - } - } - c.AddCommand(c.helpCommand) -} - -// Used for testing -func (c *Command) ResetCommands() { - c.commands = nil - c.helpCommand = nil -} - -// Sorts commands by their names -type commandSorterByName []*Command - -func (c commandSorterByName) Len() int { return len(c) } -func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] } -func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() } - -// Commands returns a sorted slice of child commands. -func (c *Command) Commands() []*Command { - // do not sort commands if it already sorted or sorting was disabled - if EnableCommandSorting && !c.commandsAreSorted { - sort.Sort(commandSorterByName(c.commands)) - c.commandsAreSorted = true - } - return c.commands -} - -// AddCommand adds one or more commands to this parent command. -func (c *Command) AddCommand(cmds ...*Command) { - for i, x := range cmds { - if cmds[i] == c { - panic("Command can't be a child of itself") - } - cmds[i].parent = c - // update max lengths - usageLen := len(x.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(x.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(x.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - // If global normalization function exists, update all children - if c.globNormFunc != nil { - x.SetGlobalNormalizationFunc(c.globNormFunc) - } - c.commands = append(c.commands, x) - c.commandsAreSorted = false - } -} - -// RemoveCommand removes one or more commands from a parent command. -func (c *Command) RemoveCommand(cmds ...*Command) { - commands := []*Command{} -main: - for _, command := range c.commands { - for _, cmd := range cmds { - if command == cmd { - command.parent = nil - continue main - } - } - commands = append(commands, command) - } - c.commands = commands - // recompute all lengths - c.commandsMaxUseLen = 0 - c.commandsMaxCommandPathLen = 0 - c.commandsMaxNameLen = 0 - for _, command := range c.commands { - usageLen := len(command.Use) - if usageLen > c.commandsMaxUseLen { - c.commandsMaxUseLen = usageLen - } - commandPathLen := len(command.CommandPath()) - if commandPathLen > c.commandsMaxCommandPathLen { - c.commandsMaxCommandPathLen = commandPathLen - } - nameLen := len(command.Name()) - if nameLen > c.commandsMaxNameLen { - c.commandsMaxNameLen = nameLen - } - } -} - -// Print is a convenience method to Print to the defined output -func (c *Command) Print(i ...interface{}) { - fmt.Fprint(c.Out(), i...) -} - -// Println is a convenience method to Println to the defined output -func (c *Command) Println(i ...interface{}) { - str := fmt.Sprintln(i...) - c.Print(str) -} - -// Printf is a convenience method to Printf to the defined output -func (c *Command) Printf(format string, i ...interface{}) { - str := fmt.Sprintf(format, i...) - c.Print(str) -} - -// Output the usage for the command -// Used when a user provides invalid input -// Can be defined by user by overriding UsageFunc -func (c *Command) Usage() error { - c.mergePersistentFlags() - err := c.UsageFunc()(c) - return err -} - -// Output the help for the command -// Used when a user calls help [command] -// by the default HelpFunc in the commander -func (c *Command) Help() error { - c.mergePersistentFlags() - err := tmpl(c.getOutOrStdout(), c.HelpTemplate(), c) - return err -} - -func (c *Command) UsageString() string { - tmpOutput := c.output - bb := new(bytes.Buffer) - c.SetOutput(bb) - c.Usage() - c.output = tmpOutput - return bb.String() -} - -// CommandPath returns the full path to this command. -func (c *Command) CommandPath() string { - str := c.Name() - x := c - for x.HasParent() { - str = x.parent.Name() + " " + str - x = x.parent - } - return str -} - -//The full usage for a given command (including parents) -func (c *Command) UseLine() string { - str := "" - if c.HasParent() { - str = c.parent.CommandPath() + " " - } - return str + c.Use -} - -// For use in determining which flags have been assigned to which commands -// and which persist -func (c *Command) DebugFlags() { - c.Println("DebugFlags called on", c.Name()) - var debugflags func(*Command) - - debugflags = func(x *Command) { - if x.HasFlags() || x.HasPersistentFlags() { - c.Println(x.Name()) - } - if x.HasFlags() { - x.flags.VisitAll(func(f *flag.Flag) { - if x.HasPersistentFlags() { - if x.persistentFlag(f.Name) == nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") - } - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") - } - }) - } - if x.HasPersistentFlags() { - x.pflags.VisitAll(func(f *flag.Flag) { - if x.HasFlags() { - if x.flags.Lookup(f.Name) == nil { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - } else { - c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") - } - }) - } - c.Println(x.flagErrorBuf) - if x.HasSubCommands() { - for _, y := range x.commands { - debugflags(y) - } - } - } - - debugflags(c) -} - -// Name returns the command's name: the first word in the use line. -func (c *Command) Name() string { - if c.name != "" { - return c.name - } - name := c.Use - i := strings.Index(name, " ") - if i >= 0 { - name = name[:i] - } - return name -} - -// HasAlias determines if a given string is an alias of the command. -func (c *Command) HasAlias(s string) bool { - for _, a := range c.Aliases { - if a == s { - return true - } - } - return false -} - -// HasNameOrAliasPrefix returns true if the Name or any of aliases start -// with prefix -func (c *Command) HasNameOrAliasPrefix(prefix string) bool { - if strings.HasPrefix(c.Name(), prefix) { - return true - } - for _, alias := range c.Aliases { - if strings.HasPrefix(alias, prefix) { - return true - } - } - return false -} - -func (c *Command) NameAndAliases() string { - return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") -} - -func (c *Command) HasExample() bool { - return len(c.Example) > 0 -} - -// Runnable determines if the command is itself runnable -func (c *Command) Runnable() bool { - return c.Run != nil || c.RunE != nil -} - -// HasSubCommands determines if the command has children commands -func (c *Command) HasSubCommands() bool { - return len(c.commands) > 0 -} - -// IsAvailableCommand determines if a command is available as a non-help command -// (this includes all non deprecated/hidden commands) -func (c *Command) IsAvailableCommand() bool { - if len(c.Deprecated) != 0 || c.Hidden { - return false - } - - if c.HasParent() && c.Parent().helpCommand == c { - return false - } - - if c.Runnable() || c.HasAvailableSubCommands() { - return true - } - - return false -} - -// IsHelpCommand determines if a command is a 'help' command; a help command is -// determined by the fact that it is NOT runnable/hidden/deprecated, and has no -// sub commands that are runnable/hidden/deprecated -func (c *Command) IsHelpCommand() bool { - - // if a command is runnable, deprecated, or hidden it is not a 'help' command - if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { - return false - } - - // if any non-help sub commands are found, the command is not a 'help' command - for _, sub := range c.commands { - if !sub.IsHelpCommand() { - return false - } - } - - // the command either has no sub commands, or no non-help sub commands - return true -} - -// HasHelpSubCommands determines if a command has any avilable 'help' sub commands -// that need to be shown in the usage/help default template under 'additional help -// topics' -func (c *Command) HasHelpSubCommands() bool { - - // return true on the first found available 'help' sub command - for _, sub := range c.commands { - if sub.IsHelpCommand() { - return true - } - } - - // the command either has no sub commands, or no available 'help' sub commands - return false -} - -// HasAvailableSubCommands determines if a command has available sub commands that -// need to be shown in the usage/help default template under 'available commands' -func (c *Command) HasAvailableSubCommands() bool { - - // return true on the first found available (non deprecated/help/hidden) - // sub command - for _, sub := range c.commands { - if sub.IsAvailableCommand() { - return true - } - } - - // the command either has no sub comamnds, or no available (non deprecated/help/hidden) - // sub commands - return false -} - -// Determine if the command is a child command -func (c *Command) HasParent() bool { - return c.parent != nil -} - -// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists -func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { - return c.globNormFunc -} - -// Get the complete FlagSet that applies to this command (local and persistent declared here and by all parents) -func (c *Command) Flags() *flag.FlagSet { - if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.flags.SetOutput(c.flagErrorBuf) - } - return c.flags -} - -// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands -func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { - persistentFlags := c.PersistentFlags() - - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.LocalFlags().VisitAll(func(f *flag.Flag) { - if persistentFlags.Lookup(f.Name) == nil { - out.AddFlag(f) - } - }) - return out -} - -// Get the local FlagSet specifically set in the current command -func (c *Command) LocalFlags() *flag.FlagSet { - c.mergePersistentFlags() - - local := flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.lflags.VisitAll(func(f *flag.Flag) { - local.AddFlag(f) - }) - if !c.HasParent() { - flag.CommandLine.VisitAll(func(f *flag.Flag) { - if local.Lookup(f.Name) == nil { - local.AddFlag(f) - } - }) - } - return local -} - -// All Flags which were inherited from parents commands -func (c *Command) InheritedFlags() *flag.FlagSet { - c.mergePersistentFlags() - - inherited := flag.NewFlagSet(c.Name(), flag.ContinueOnError) - local := c.LocalFlags() - - var rmerge func(x *Command) - - rmerge = func(x *Command) { - if x.HasPersistentFlags() { - x.PersistentFlags().VisitAll(func(f *flag.Flag) { - if inherited.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { - inherited.AddFlag(f) - } - }) - } - if x.HasParent() { - rmerge(x.parent) - } - } - - if c.HasParent() { - rmerge(c.parent) - } - - return inherited -} - -// All Flags which were not inherited from parent commands -func (c *Command) NonInheritedFlags() *flag.FlagSet { - return c.LocalFlags() -} - -// Get the Persistent FlagSet specifically set in the current command -func (c *Command) PersistentFlags() *flag.FlagSet { - if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.pflags.SetOutput(c.flagErrorBuf) - } - return c.pflags -} - -// For use in testing -func (c *Command) ResetFlags() { - c.flagErrorBuf = new(bytes.Buffer) - c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - c.pflags.SetOutput(c.flagErrorBuf) -} - -// Does the command contain any flags (local plus persistent from the entire structure) -func (c *Command) HasFlags() bool { - return c.Flags().HasFlags() -} - -// Does the command contain persistent flags -func (c *Command) HasPersistentFlags() bool { - return c.PersistentFlags().HasFlags() -} - -// Does the command has flags specifically declared locally -func (c *Command) HasLocalFlags() bool { - return c.LocalFlags().HasFlags() -} - -// Does the command have flags inherited from its parent command -func (c *Command) HasInheritedFlags() bool { - return c.InheritedFlags().HasFlags() -} - -// Does the command contain any flags (local plus persistent from the entire -// structure) which are not hidden or deprecated -func (c *Command) HasAvailableFlags() bool { - return c.Flags().HasAvailableFlags() -} - -// Does the command contain persistent flags which are not hidden or deprecated -func (c *Command) HasAvailablePersistentFlags() bool { - return c.PersistentFlags().HasAvailableFlags() -} - -// Does the command has flags specifically declared locally which are not hidden -// or deprecated -func (c *Command) HasAvailableLocalFlags() bool { - return c.LocalFlags().HasAvailableFlags() -} - -// Does the command have flags inherited from its parent command which are -// not hidden or deprecated -func (c *Command) HasAvailableInheritedFlags() bool { - return c.InheritedFlags().HasAvailableFlags() -} - -// Flag climbs up the command tree looking for matching flag -func (c *Command) Flag(name string) (flag *flag.Flag) { - flag = c.Flags().Lookup(name) - - if flag == nil { - flag = c.persistentFlag(name) - } - - return -} - -// recursively find matching persistent flag -func (c *Command) persistentFlag(name string) (flag *flag.Flag) { - if c.HasPersistentFlags() { - flag = c.PersistentFlags().Lookup(name) - } - - if flag == nil && c.HasParent() { - flag = c.parent.persistentFlag(name) - } - return -} - -// ParseFlags parses persistent flag tree & local flags -func (c *Command) ParseFlags(args []string) (err error) { - if c.DisableFlagParsing { - return nil - } - c.mergePersistentFlags() - err = c.Flags().Parse(args) - return -} - -// Parent returns a commands parent command -func (c *Command) Parent() *Command { - return c.parent -} - -func (c *Command) mergePersistentFlags() { - var rmerge func(x *Command) - - // Save the set of local flags - if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) - if c.flagErrorBuf == nil { - c.flagErrorBuf = new(bytes.Buffer) - } - c.lflags.SetOutput(c.flagErrorBuf) - addtolocal := func(f *flag.Flag) { - c.lflags.AddFlag(f) - } - c.Flags().VisitAll(addtolocal) - c.PersistentFlags().VisitAll(addtolocal) - } - rmerge = func(x *Command) { - if !x.HasParent() { - flag.CommandLine.VisitAll(func(f *flag.Flag) { - if x.PersistentFlags().Lookup(f.Name) == nil { - x.PersistentFlags().AddFlag(f) - } - }) - } - if x.HasPersistentFlags() { - x.PersistentFlags().VisitAll(func(f *flag.Flag) { - if c.Flags().Lookup(f.Name) == nil { - c.Flags().AddFlag(f) - } - }) - } - if x.HasParent() { - rmerge(x.parent) - } - } - - rmerge(c) -} diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go deleted file mode 100644 index 6159c1cc1..000000000 --- a/vendor/github.com/spf13/cobra/command_notwin.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !windows - -package cobra - -var preExecHookFn func(*Command) diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go deleted file mode 100644 index 4b0eaa1b6..000000000 --- a/vendor/github.com/spf13/cobra/command_win.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build windows - -package cobra - -import ( - "os" - "time" - - "github.com/inconshreveable/mousetrap" -) - -var preExecHookFn = preExecHook - -// enables an information splash screen on Windows if the CLI is started from explorer.exe. -var MousetrapHelpText string = `This is a command line tool - -You need to open cmd.exe and run it from there. -` - -func preExecHook(c *Command) { - if mousetrap.StartedByExplorer() { - c.Print(MousetrapHelpText) - time.Sleep(5 * time.Second) - os.Exit(1) - } -} diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE deleted file mode 100644 index 63ed1cfea..000000000 --- a/vendor/github.com/spf13/pflag/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 Alex Ogier. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md deleted file mode 100644 index b052414d1..000000000 --- a/vendor/github.com/spf13/pflag/README.md +++ /dev/null @@ -1,296 +0,0 @@ -[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag) -[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag) - -## Description - -pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the [GNU extensions to the POSIX recommendations -for command-line options][1]. For a more precise description, see the -"Command-line flag syntax" section below. - -[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -pflag is available under the same style of BSD license as the Go language, -which can be found in the LICENSE file. - -## Installation - -pflag is available using the standard `go get` command. - -Install by running: - - go get github.com/spf13/pflag - -Run tests by running: - - go test github.com/spf13/pflag - -## Usage - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - -``` go -import flag "github.com/spf13/pflag" -``` - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - -``` go -var ip *int = flag.Int("flagname", 1234, "help message for flagname") -``` - -If you like, you can bind the flag to a variable using the Var() functions. - -``` go -var flagvar int -func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") -} -``` - -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - -``` go -flag.Var(&flagVal, "name", "help message for flagname") -``` - -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - -``` go -flag.Parse() -``` - -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - -``` go -fmt.Println("ip has value ", *ip) -fmt.Println("flagvar has value ", flagvar) -``` - -There are helpers function to get values later if you have the FlagSet but -it was difficult to keep up with all of the flag pointers in your code. -If you have a pflag.FlagSet with a flag called 'flagname' of type int you -can use GetInt() to get the int value. But notice that 'flagname' must exist -and it must be an int. GetString("flagname") will fail. - -``` go -i, err := flagset.GetInt("flagname") -``` - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -var flagvar bool -func init() { - flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") -} -flag.VarP(&flagVal, "varname", "v", "help message") -``` - -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. - -## Setting no option default values for flags - -After you create a flag it is possible to set the pflag.NoOptDefVal for -the given flag. Doing this changes the meaning of the flag slightly. If -a flag has a NoOptDefVal and the flag is set on the command line without -an option the flag will be set to the NoOptDefVal. For example given: - -``` go -var ip = flag.IntP("flagname", "f", 1234, "help message") -flag.Lookup("flagname").NoOptDefVal = "4321" -``` - -Would result in something like - -| Parsed Arguments | Resulting Value | -| ------------- | ------------- | -| --flagname=1357 | ip=1357 | -| --flagname | ip=4321 | -| [nothing] | ip=1234 | - -## Command line flag syntax - -``` ---flag // boolean flags, or flags with no option default values ---flag x // only on flags without a default value ---flag=x -``` - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags -or a flag with a default value - -``` -// boolean or flags where the 'no option default value' is set --f --f=true --abc -but --b true is INVALID - -// non-boolean and flags without a 'no option default value' --n 1234 --n=1234 --n1234 - -// mixed --abcs "hello" --absd="hello" --abcs1234 -``` - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -## Mutating or "Normalizing" Flag names - -It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow. - -**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag - -``` go -func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - from := []string{"-", "_"} - to := "." - for _, sep := range from { - name = strings.Replace(name, sep, to, -1) - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc) -``` - -**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name - -``` go -func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { - switch name { - case "old-flag-name": - name = "new-flag-name" - break - } - return pflag.NormalizedName(name) -} - -myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) -``` - -## Deprecating a flag or its shorthand -It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used. - -**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead. -```go -// deprecate a flag by specifying its name and a usage message -flags.MarkDeprecated("badflag", "please use --good-flag instead") -``` -This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used. - -**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n". -```go -// deprecate a flag shorthand by specifying its flag name and a usage message -flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only") -``` -This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used. - -Note that usage message is essential here, and it should not be empty. - -## Hidden flags -It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text. - -**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available. -```go -// hide a flag by specifying its name -flags.MarkHidden("secretFlag") -``` - -## Disable sorting of flags -`pflag` allows you to disable sorting of flags for help and usage message. - -**Example**: -```go -flags.BoolP("verbose", "v", false, "verbose output") -flags.String("coolflag", "yeaah", "it's really cool flag") -flags.Int("usefulflag", 777, "sometimes it's very useful") -flags.SortFlags = false -flags.PrintDefaults() -``` -**Output**: -``` - -v, --verbose verbose output - --coolflag string it's really cool flag (default "yeaah") - --usefulflag int sometimes it's very useful (default 777) -``` - - -## Supporting Go flags when using pflag -In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary -to support flags defined by third-party dependencies (e.g. `golang/glog`). - -**Example**: You want to add the Go flags to the `CommandLine` flagset -```go -import ( - goflag "flag" - flag "github.com/spf13/pflag" -) - -var ip *int = flag.Int("flagname", 1234, "help message for flagname") - -func main() { - flag.CommandLine.AddGoFlagSet(goflag.CommandLine) - flag.Parse() -} -``` - -## More info - -You can see the full reference documentation of the pflag package -[at godoc.org][3], or through go's standard documentation system by -running `godoc -http=:6060` and browsing to -[http://localhost:6060/pkg/github.com/spf13/pflag][2] after -installation. - -[2]: http://localhost:6060/pkg/github.com/spf13/pflag -[3]: http://godoc.org/github.com/spf13/pflag diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go deleted file mode 100644 index c4c5c0bfd..000000000 --- a/vendor/github.com/spf13/pflag/bool.go +++ /dev/null @@ -1,94 +0,0 @@ -package pflag - -import "strconv" - -// optional interface to indicate boolean flags that can be -// supplied without "=value" text -type boolFlag interface { - Value - IsBoolFlag() bool -} - -// -- bool Value -type boolValue bool - -func newBoolValue(val bool, p *bool) *boolValue { - *p = val - return (*boolValue)(p) -} - -func (b *boolValue) Set(s string) error { - v, err := strconv.ParseBool(s) - *b = boolValue(v) - return err -} - -func (b *boolValue) Type() string { - return "bool" -} - -func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) } - -func (b *boolValue) IsBoolFlag() bool { return true } - -func boolConv(sval string) (interface{}, error) { - return strconv.ParseBool(sval) -} - -// GetBool return the bool value of a flag with the given name -func (f *FlagSet) GetBool(name string) (bool, error) { - val, err := f.getFlagType(name, "bool", boolConv) - if err != nil { - return false, err - } - return val.(bool), nil -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { - f.BoolVarP(p, name, "", value, usage) -} - -// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage) - flag.NoOptDefVal = "true" -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func BoolVar(p *bool, name string, value bool, usage string) { - BoolVarP(p, name, "", value, usage) -} - -// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. -func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { - flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage) - flag.NoOptDefVal = "true" -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func (f *FlagSet) Bool(name string, value bool, usage string) *bool { - return f.BoolP(name, "", value, usage) -} - -// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { - p := new(bool) - f.BoolVarP(p, name, shorthand, value, usage) - return p -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func Bool(name string, value bool, usage string) *bool { - return BoolP(name, "", value, usage) -} - -// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. -func BoolP(name, shorthand string, value bool, usage string) *bool { - b := CommandLine.BoolP(name, shorthand, value, usage) - return b -} diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go deleted file mode 100644 index 5af02f1a7..000000000 --- a/vendor/github.com/spf13/pflag/bool_slice.go +++ /dev/null @@ -1,147 +0,0 @@ -package pflag - -import ( - "io" - "strconv" - "strings" -) - -// -- boolSlice Value -type boolSliceValue struct { - value *[]bool - changed bool -} - -func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue { - bsv := new(boolSliceValue) - bsv.value = p - *bsv.value = val - return bsv -} - -// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag. -// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended. -func (s *boolSliceValue) Set(val string) error { - - // remove all quote characters - rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") - - // read flag arguments with CSV parser - boolStrSlice, err := readAsCSV(rmQuote.Replace(val)) - if err != nil && err != io.EOF { - return err - } - - // parse boolean values into slice - out := make([]bool, 0, len(boolStrSlice)) - for _, boolStr := range boolStrSlice { - b, err := strconv.ParseBool(strings.TrimSpace(boolStr)) - if err != nil { - return err - } - out = append(out, b) - } - - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - - s.changed = true - - return nil -} - -// Type returns a string that uniquely represents this flag's type. -func (s *boolSliceValue) Type() string { - return "boolSlice" -} - -// String defines a "native" format for this boolean slice flag value. -func (s *boolSliceValue) String() string { - - boolStrSlice := make([]string, len(*s.value)) - for i, b := range *s.value { - boolStrSlice[i] = strconv.FormatBool(b) - } - - out, _ := writeAsCSV(boolStrSlice) - - return "[" + out + "]" -} - -func boolSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []bool{}, nil - } - ss := strings.Split(val, ",") - out := make([]bool, len(ss)) - for i, t := range ss { - var err error - out[i], err = strconv.ParseBool(t) - if err != nil { - return nil, err - } - } - return out, nil -} - -// GetBoolSlice returns the []bool value of a flag with the given name. -func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) { - val, err := f.getFlagType(name, "boolSlice", boolSliceConv) - if err != nil { - return []bool{}, err - } - return val.([]bool), nil -} - -// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string. -// The argument p points to a []bool variable in which to store the value of the flag. -func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) { - f.VarP(newBoolSliceValue(value, p), name, "", usage) -} - -// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { - f.VarP(newBoolSliceValue(value, p), name, shorthand, usage) -} - -// BoolSliceVar defines a []bool flag with specified name, default value, and usage string. -// The argument p points to a []bool variable in which to store the value of the flag. -func BoolSliceVar(p *[]bool, name string, value []bool, usage string) { - CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage) -} - -// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash. -func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) { - CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage) -} - -// BoolSlice defines a []bool flag with specified name, default value, and usage string. -// The return value is the address of a []bool variable that stores the value of the flag. -func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool { - p := []bool{} - f.BoolSliceVarP(&p, name, "", value, usage) - return &p -} - -// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { - p := []bool{} - f.BoolSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// BoolSlice defines a []bool flag with specified name, default value, and usage string. -// The return value is the address of a []bool variable that stores the value of the flag. -func BoolSlice(name string, value []bool, usage string) *[]bool { - return CommandLine.BoolSliceP(name, "", value, usage) -} - -// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash. -func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool { - return CommandLine.BoolSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go deleted file mode 100644 index 250a43814..000000000 --- a/vendor/github.com/spf13/pflag/count.go +++ /dev/null @@ -1,96 +0,0 @@ -package pflag - -import "strconv" - -// -- count Value -type countValue int - -func newCountValue(val int, p *int) *countValue { - *p = val - return (*countValue)(p) -} - -func (i *countValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - // -1 means that no specific value was passed, so increment - if v == -1 { - *i = countValue(*i + 1) - } else { - *i = countValue(v) - } - return err -} - -func (i *countValue) Type() string { - return "count" -} - -func (i *countValue) String() string { return strconv.Itoa(int(*i)) } - -func countConv(sval string) (interface{}, error) { - i, err := strconv.Atoi(sval) - if err != nil { - return nil, err - } - return i, nil -} - -// GetCount return the int value of a flag with the given name -func (f *FlagSet) GetCount(name string) (int, error) { - val, err := f.getFlagType(name, "count", countConv) - if err != nil { - return 0, err - } - return val.(int), nil -} - -// CountVar defines a count flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line -func (f *FlagSet) CountVar(p *int, name string, usage string) { - f.CountVarP(p, name, "", usage) -} - -// CountVarP is like CountVar only take a shorthand for the flag name. -func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { - flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) - flag.NoOptDefVal = "-1" -} - -// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set -func CountVar(p *int, name string, usage string) { - CommandLine.CountVar(p, name, usage) -} - -// CountVarP is like CountVar only take a shorthand for the flag name. -func CountVarP(p *int, name, shorthand string, usage string) { - CommandLine.CountVarP(p, name, shorthand, usage) -} - -// Count defines a count flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line -func (f *FlagSet) Count(name string, usage string) *int { - p := new(int) - f.CountVarP(p, name, "", usage) - return p -} - -// CountP is like Count only takes a shorthand for the flag name. -func (f *FlagSet) CountP(name, shorthand string, usage string) *int { - p := new(int) - f.CountVarP(p, name, shorthand, usage) - return p -} - -// Count defines a count flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line -func Count(name string, usage string) *int { - return CommandLine.CountP(name, "", usage) -} - -// CountP is like Count only takes a shorthand for the flag name. -func CountP(name, shorthand string, usage string) *int { - return CommandLine.CountP(name, shorthand, usage) -} diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go deleted file mode 100644 index e9debef88..000000000 --- a/vendor/github.com/spf13/pflag/duration.go +++ /dev/null @@ -1,86 +0,0 @@ -package pflag - -import ( - "time" -) - -// -- time.Duration Value -type durationValue time.Duration - -func newDurationValue(val time.Duration, p *time.Duration) *durationValue { - *p = val - return (*durationValue)(p) -} - -func (d *durationValue) Set(s string) error { - v, err := time.ParseDuration(s) - *d = durationValue(v) - return err -} - -func (d *durationValue) Type() string { - return "duration" -} - -func (d *durationValue) String() string { return (*time.Duration)(d).String() } - -func durationConv(sval string) (interface{}, error) { - return time.ParseDuration(sval) -} - -// GetDuration return the duration value of a flag with the given name -func (f *FlagSet) GetDuration(name string) (time.Duration, error) { - val, err := f.getFlagType(name, "duration", durationConv) - if err != nil { - return 0, err - } - return val.(time.Duration), nil -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, "", usage) -} - -// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - f.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, "", usage) -} - -// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. -func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { - CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, "", value, usage) - return p -} - -// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - f.DurationVarP(p, name, shorthand, value, usage) - return p -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func Duration(name string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, "", value, usage) -} - -// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. -func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { - return CommandLine.DurationP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go deleted file mode 100644 index f9984fbb1..000000000 --- a/vendor/github.com/spf13/pflag/flag.go +++ /dev/null @@ -1,1107 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pflag is a drop-in replacement for Go's flag package, implementing -POSIX/GNU-style --flags. - -pflag is compatible with the GNU extensions to the POSIX recommendations -for command-line options. See -http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - -Usage: - -pflag is a drop-in replacement of Go's native flag package. If you import -pflag under the name "flag" then all code should continue to function -with no changes. - - import flag "github.com/spf13/pflag" - -There is one exception to this: if you directly instantiate the Flag struct -there is one more field "Shorthand" that you will need to set. -Most code never instantiates this struct directly, and instead uses -functions such as String(), BoolVar(), and Var(), and is therefore -unaffected. - -Define flags using flag.String(), Bool(), Int(), etc. - -This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - var ip = flag.Int("flagname", 1234, "help message for flagname") -If you like, you can bind the flag to a variable using the Var() functions. - var flagvar int - func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") - } -Or you can create custom flags that satisfy the Value interface (with -pointer receivers) and couple them to flag parsing by - flag.Var(&flagVal, "name", "help message for flagname") -For such flags, the default value is just the initial value of the variable. - -After all flags are defined, call - flag.Parse() -to parse the command line into the defined flags. - -Flags may then be used directly. If you're using the flags themselves, -they are all pointers; if you bind to variables, they're values. - fmt.Println("ip has value ", *ip) - fmt.Println("flagvar has value ", flagvar) - -After parsing, the arguments after the flag are available as the -slice flag.Args() or individually as flag.Arg(i). -The arguments are indexed from 0 through flag.NArg()-1. - -The pflag package also defines some new functions that are not in flag, -that give one-letter shorthands for flags. You can use these by appending -'P' to the name of any function that defines a flag. - var ip = flag.IntP("flagname", "f", 1234, "help message") - var flagvar bool - func init() { - flag.BoolVarP("boolname", "b", true, "help message") - } - flag.VarP(&flagVar, "varname", "v", 1234, "help message") -Shorthand letters can be used with single dashes on the command line. -Boolean shorthand flags can be combined with other shorthand flags. - -Command line flag syntax: - --flag // boolean flags only - --flag=x - -Unlike the flag package, a single dash before an option means something -different than a double dash. Single dashes signify a series of shorthand -letters for flags. All but the last shorthand letter must be boolean flags. - // boolean flags - -f - -abc - // non-boolean flags - -n 1234 - -Ifile - // mixed - -abcs "hello" - -abcn1234 - -Flag parsing stops after the terminator "--". Unlike the flag package, -flags can be interspersed with arguments anywhere on the command line -before this terminator. - -Integer flags accept 1234, 0664, 0x1234 and may be negative. -Boolean flags (in their long form) accept 1, 0, t, f, true, false, -TRUE, FALSE, True, False. -Duration flags accept any input valid for time.ParseDuration. - -The default set of command-line flags is controlled by -top-level functions. The FlagSet type allows one to define -independent sets of flags, such as to implement subcommands -in a command-line interface. The methods of FlagSet are -analogous to the top-level functions for the command-line -flag set. -*/ -package pflag - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "sort" - "strings" -) - -// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. -var ErrHelp = errors.New("pflag: help requested") - -// ErrorHandling defines how to handle flag parsing errors. -type ErrorHandling int - -const ( - // ContinueOnError will return an err from Parse() if an error is found - ContinueOnError ErrorHandling = iota - // ExitOnError will call os.Exit(2) if an error is found when parsing - ExitOnError - // PanicOnError will panic() if an error is found when parsing flags - PanicOnError -) - -// NormalizedName is a flag name that has been normalized according to rules -// for the FlagSet (e.g. making '-' and '_' equivalent). -type NormalizedName string - -// A FlagSet represents a set of defined flags. -type FlagSet struct { - // Usage is the function called when an error occurs while parsing flags. - // The field is a function (not a method) that may be changed to point to - // a custom error handler. - Usage func() - - // SortFlags is used to indicate, if user wants to have sorted flags in - // help/usage messages. - SortFlags bool - - name string - parsed bool - actual map[NormalizedName]*Flag - orderedActual []*Flag - sortedActual []*Flag - formal map[NormalizedName]*Flag - orderedFormal []*Flag - sortedFormal []*Flag - shorthands map[byte]*Flag - args []string // arguments after flags - argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- - errorHandling ErrorHandling - output io.Writer // nil means stderr; use out() accessor - interspersed bool // allow interspersed option/non-option args - normalizeNameFunc func(f *FlagSet, name string) NormalizedName -} - -// A Flag represents the state of a flag. -type Flag struct { - Name string // name as it appears on command line - Shorthand string // one-letter abbreviated flag - Usage string // help message - Value Value // value as set - DefValue string // default value (as text); for usage message - Changed bool // If the user set the value (or if left to default) - NoOptDefVal string // default value (as text); if the flag is on the command line without any options - Deprecated string // If this flag is deprecated, this string is the new or now thing to use - Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text - ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use - Annotations map[string][]string // used by cobra.Command bash autocomple code -} - -// Value is the interface to the dynamic value stored in a flag. -// (The default value is represented as a string.) -type Value interface { - String() string - Set(string) error - Type() string -} - -// sortFlags returns the flags as a slice in lexicographical sorted order. -func sortFlags(flags map[NormalizedName]*Flag) []*Flag { - list := make(sort.StringSlice, len(flags)) - i := 0 - for k := range flags { - list[i] = string(k) - i++ - } - list.Sort() - result := make([]*Flag, len(list)) - for i, name := range list { - result[i] = flags[NormalizedName(name)] - } - return result -} - -// SetNormalizeFunc allows you to add a function which can translate flag names. -// Flags added to the FlagSet will be translated and then when anything tries to -// look up the flag that will also be translated. So it would be possible to create -// a flag named "getURL" and have it translated to "geturl". A user could then pass -// "--getUrl" which may also be translated to "geturl" and everything will work. -func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { - f.normalizeNameFunc = n - f.sortedFormal = f.sortedFormal[:0] - for k, v := range f.orderedFormal { - delete(f.formal, NormalizedName(v.Name)) - nname := f.normalizeFlagName(v.Name) - v.Name = string(nname) - f.formal[nname] = v - f.orderedFormal[k] = v - } -} - -// GetNormalizeFunc returns the previously set NormalizeFunc of a function which -// does no translation, if not set previously. -func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { - if f.normalizeNameFunc != nil { - return f.normalizeNameFunc - } - return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) } -} - -func (f *FlagSet) normalizeFlagName(name string) NormalizedName { - n := f.GetNormalizeFunc() - return n(f, name) -} - -func (f *FlagSet) out() io.Writer { - if f.output == nil { - return os.Stderr - } - return f.output -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (f *FlagSet) SetOutput(output io.Writer) { - f.output = output -} - -// VisitAll visits the flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits all flags, even those not set. -func (f *FlagSet) VisitAll(fn func(*Flag)) { - if len(f.formal) == 0 { - return - } - - var flags []*Flag - if f.SortFlags { - if len(f.formal) != len(f.sortedFormal) { - f.sortedFormal = sortFlags(f.formal) - } - flags = f.sortedFormal - } else { - flags = f.orderedFormal - } - - for _, flag := range flags { - fn(flag) - } -} - -// HasFlags returns a bool to indicate if the FlagSet has any flags definied. -func (f *FlagSet) HasFlags() bool { - return len(f.formal) > 0 -} - -// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags -// definied that are not hidden or deprecated. -func (f *FlagSet) HasAvailableFlags() bool { - for _, flag := range f.formal { - if !flag.Hidden && len(flag.Deprecated) == 0 { - return true - } - } - return false -} - -// VisitAll visits the command-line flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits all flags, even those not set. -func VisitAll(fn func(*Flag)) { - CommandLine.VisitAll(fn) -} - -// Visit visits the flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits only those flags that have been set. -func (f *FlagSet) Visit(fn func(*Flag)) { - if len(f.actual) == 0 { - return - } - - var flags []*Flag - if f.SortFlags { - if len(f.actual) != len(f.sortedActual) { - f.sortedActual = sortFlags(f.actual) - } - flags = f.sortedActual - } else { - flags = f.orderedActual - } - - for _, flag := range flags { - fn(flag) - } -} - -// Visit visits the command-line flags in lexicographical order or -// in primordial order if f.SortFlags is false, calling fn for each. -// It visits only those flags that have been set. -func Visit(fn func(*Flag)) { - CommandLine.Visit(fn) -} - -// Lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) Lookup(name string) *Flag { - return f.lookup(f.normalizeFlagName(name)) -} - -// lookup returns the Flag structure of the named flag, returning nil if none exists. -func (f *FlagSet) lookup(name NormalizedName) *Flag { - return f.formal[name] -} - -// func to return a given type for a given flag name -func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { - flag := f.Lookup(name) - if flag == nil { - err := fmt.Errorf("flag accessed but not defined: %s", name) - return nil, err - } - - if flag.Value.Type() != ftype { - err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type()) - return nil, err - } - - sval := flag.Value.String() - result, err := convFunc(sval) - if err != nil { - return nil, err - } - return result, nil -} - -// ArgsLenAtDash will return the length of f.Args at the moment when a -- was -// found during arg parsing. This allows your program to know which args were -// before the -- and which came after. -func (f *FlagSet) ArgsLenAtDash() int { - return f.argsLenAtDash -} - -// MarkDeprecated indicated that a flag is deprecated in your program. It will -// continue to function but will not show up in help or usage messages. Using -// this flag will also print the given usageMessage. -func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - if usageMessage == "" { - return fmt.Errorf("deprecated message for flag %q must be set", name) - } - flag.Deprecated = usageMessage - return nil -} - -// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your -// program. It will continue to function but will not show up in help or usage -// messages. Using this flag will also print the given usageMessage. -func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - if usageMessage == "" { - return fmt.Errorf("deprecated message for flag %q must be set", name) - } - flag.ShorthandDeprecated = usageMessage - return nil -} - -// MarkHidden sets a flag to 'hidden' in your program. It will continue to -// function but will not show up in help or usage messages. -func (f *FlagSet) MarkHidden(name string) error { - flag := f.Lookup(name) - if flag == nil { - return fmt.Errorf("flag %q does not exist", name) - } - flag.Hidden = true - return nil -} - -// Lookup returns the Flag structure of the named command-line flag, -// returning nil if none exists. -func Lookup(name string) *Flag { - return CommandLine.Lookup(name) -} - -// Set sets the value of the named flag. -func (f *FlagSet) Set(name, value string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - - err := flag.Value.Set(value) - if err != nil { - var flagName string - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name) - } else { - flagName = fmt.Sprintf("--%s", flag.Name) - } - return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) - } - - if f.actual == nil { - f.actual = make(map[NormalizedName]*Flag) - } - f.actual[normalName] = flag - f.orderedActual = append(f.orderedActual, flag) - - flag.Changed = true - - if flag.Deprecated != "" { - fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) - } - return nil -} - -// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet. -// This is sometimes used by spf13/cobra programs which want to generate additional -// bash completion information. -func (f *FlagSet) SetAnnotation(name, key string, values []string) error { - normalName := f.normalizeFlagName(name) - flag, ok := f.formal[normalName] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - if flag.Annotations == nil { - flag.Annotations = map[string][]string{} - } - flag.Annotations[key] = values - return nil -} - -// Changed returns true if the flag was explicitly set during Parse() and false -// otherwise -func (f *FlagSet) Changed(name string) bool { - flag := f.Lookup(name) - // If a flag doesn't exist, it wasn't changed.... - if flag == nil { - return false - } - return flag.Changed -} - -// Set sets the value of the named command-line flag. -func Set(name, value string) error { - return CommandLine.Set(name, value) -} - -// PrintDefaults prints, to standard error unless configured -// otherwise, the default values of all defined flags in the set. -func (f *FlagSet) PrintDefaults() { - usages := f.FlagUsages() - fmt.Fprint(f.out(), usages) -} - -// defaultIsZeroValue returns true if the default value for this flag represents -// a zero value. -func (f *Flag) defaultIsZeroValue() bool { - switch f.Value.(type) { - case boolFlag: - return f.DefValue == "false" - case *durationValue: - // Beginning in Go 1.7, duration zero values are "0s" - return f.DefValue == "0" || f.DefValue == "0s" - case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value: - return f.DefValue == "0" - case *stringValue: - return f.DefValue == "" - case *ipValue, *ipMaskValue, *ipNetValue: - return f.DefValue == "" - case *intSliceValue, *stringSliceValue, *stringArrayValue: - return f.DefValue == "[]" - default: - switch f.Value.String() { - case "false": - return true - case "": - return true - case "": - return true - case "0": - return true - } - return false - } -} - -// UnquoteUsage extracts a back-quoted name from the usage -// string for a flag and returns it and the un-quoted usage. -// Given "a `name` to show" it returns ("name", "a name to show"). -// If there are no back quotes, the name is an educated guess of the -// type of the flag's value, or the empty string if the flag is boolean. -func UnquoteUsage(flag *Flag) (name string, usage string) { - // Look for a back-quoted name, but avoid the strings package. - usage = flag.Usage - for i := 0; i < len(usage); i++ { - if usage[i] == '`' { - for j := i + 1; j < len(usage); j++ { - if usage[j] == '`' { - name = usage[i+1 : j] - usage = usage[:i] + name + usage[j+1:] - return name, usage - } - } - break // Only one back quote; use type name. - } - } - - name = flag.Value.Type() - switch name { - case "bool": - name = "" - case "float64": - name = "float" - case "int64": - name = "int" - case "uint64": - name = "uint" - } - - return -} - -// Splits the string `s` on whitespace into an initial substring up to -// `i` runes in length and the remainder. Will go `slop` over `i` if -// that encompasses the entire string (which allows the caller to -// avoid short orphan words on the final line). -func wrapN(i, slop int, s string) (string, string) { - if i+slop > len(s) { - return s, "" - } - - w := strings.LastIndexAny(s[:i], " \t") - if w <= 0 { - return s, "" - } - - return s[:w], s[w+1:] -} - -// Wraps the string `s` to a maximum width `w` with leading indent -// `i`. The first line is not indented (this is assumed to be done by -// caller). Pass `w` == 0 to do no wrapping -func wrap(i, w int, s string) string { - if w == 0 { - return s - } - - // space between indent i and end of line width w into which - // we should wrap the text. - wrap := w - i - - var r, l string - - // Not enough space for sensible wrapping. Wrap as a block on - // the next line instead. - if wrap < 24 { - i = 16 - wrap = w - i - r += "\n" + strings.Repeat(" ", i) - } - // If still not enough space then don't even try to wrap. - if wrap < 24 { - return s - } - - // Try to avoid short orphan words on the final line, by - // allowing wrapN to go a bit over if that would fit in the - // remainder of the line. - slop := 5 - wrap = wrap - slop - - // Handle first line, which is indented by the caller (or the - // special case above) - l, s = wrapN(wrap, slop, s) - r = r + l - - // Now wrap the rest - for s != "" { - var t string - - t, s = wrapN(wrap, slop, s) - r = r + "\n" + strings.Repeat(" ", i) + t - } - - return r - -} - -// FlagUsagesWrapped returns a string containing the usage information -// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no -// wrapping) -func (f *FlagSet) FlagUsagesWrapped(cols int) string { - buf := new(bytes.Buffer) - - lines := make([]string, 0, len(f.formal)) - - maxlen := 0 - f.VisitAll(func(flag *Flag) { - if flag.Deprecated != "" || flag.Hidden { - return - } - - line := "" - if flag.Shorthand != "" && flag.ShorthandDeprecated == "" { - line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) - } else { - line = fmt.Sprintf(" --%s", flag.Name) - } - - varname, usage := UnquoteUsage(flag) - if varname != "" { - line += " " + varname - } - if flag.NoOptDefVal != "" { - switch flag.Value.Type() { - case "string": - line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal) - case "bool": - if flag.NoOptDefVal != "true" { - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - default: - line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) - } - } - - // This special character will be replaced with spacing once the - // correct alignment is calculated - line += "\x00" - if len(line) > maxlen { - maxlen = len(line) - } - - line += usage - if !flag.defaultIsZeroValue() { - if flag.Value.Type() == "string" { - line += fmt.Sprintf(" (default %q)", flag.DefValue) - } else { - line += fmt.Sprintf(" (default %s)", flag.DefValue) - } - } - - lines = append(lines, line) - }) - - for _, line := range lines { - sidx := strings.Index(line, "\x00") - spacing := strings.Repeat(" ", maxlen-sidx) - // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx - fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:])) - } - - return buf.String() -} - -// FlagUsages returns a string containing the usage information for all flags in -// the FlagSet -func (f *FlagSet) FlagUsages() string { - return f.FlagUsagesWrapped(0) -} - -// PrintDefaults prints to standard error the default values of all defined command-line flags. -func PrintDefaults() { - CommandLine.PrintDefaults() -} - -// defaultUsage is the default function to print a usage message. -func defaultUsage(f *FlagSet) { - fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) - f.PrintDefaults() -} - -// NOTE: Usage is not just defaultUsage(CommandLine) -// because it serves (via godoc flag Usage) as the example -// for how to write your own usage function. - -// Usage prints to standard error a usage message documenting all defined command-line flags. -// The function is a variable that may be changed to point to a custom function. -// By default it prints a simple header and calls PrintDefaults; for details about the -// format of the output and how to control it, see the documentation for PrintDefaults. -var Usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - PrintDefaults() -} - -// NFlag returns the number of flags that have been set. -func (f *FlagSet) NFlag() int { return len(f.actual) } - -// NFlag returns the number of command-line flags that have been set. -func NFlag() int { return len(CommandLine.actual) } - -// Arg returns the i'th argument. Arg(0) is the first remaining argument -// after flags have been processed. -func (f *FlagSet) Arg(i int) string { - if i < 0 || i >= len(f.args) { - return "" - } - return f.args[i] -} - -// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument -// after flags have been processed. -func Arg(i int) string { - return CommandLine.Arg(i) -} - -// NArg is the number of arguments remaining after flags have been processed. -func (f *FlagSet) NArg() int { return len(f.args) } - -// NArg is the number of arguments remaining after flags have been processed. -func NArg() int { return len(CommandLine.args) } - -// Args returns the non-flag arguments. -func (f *FlagSet) Args() []string { return f.args } - -// Args returns the non-flag command-line arguments. -func Args() []string { return CommandLine.args } - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func (f *FlagSet) Var(value Value, name string, usage string) { - f.VarP(value, name, "", usage) -} - -// VarPF is like VarP, but returns the flag created -func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: name, - Shorthand: shorthand, - Usage: usage, - Value: value, - DefValue: value.String(), - } - f.AddFlag(flag) - return flag -} - -// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { - f.VarPF(value, name, shorthand, usage) -} - -// AddFlag will add the flag to the FlagSet -func (f *FlagSet) AddFlag(flag *Flag) { - // Call normalizeFlagName function only once - normalizedFlagName := f.normalizeFlagName(flag.Name) - - _, alreadyThere := f.formal[normalizedFlagName] - if alreadyThere { - msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) - fmt.Fprintln(f.out(), msg) - panic(msg) // Happens only if flags are declared with identical names - } - if f.formal == nil { - f.formal = make(map[NormalizedName]*Flag) - } - - flag.Name = string(normalizedFlagName) - f.formal[normalizedFlagName] = flag - f.orderedFormal = append(f.orderedFormal, flag) - - if flag.Shorthand == "" { - return - } - if len(flag.Shorthand) > 1 { - msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - if f.shorthands == nil { - f.shorthands = make(map[byte]*Flag) - } - c := flag.Shorthand[0] - used, alreadyThere := f.shorthands[c] - if alreadyThere { - msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) - fmt.Fprintf(f.out(), msg) - panic(msg) - } - f.shorthands[c] = flag -} - -// AddFlagSet adds one FlagSet to another. If a flag is already present in f -// the flag from newSet will be ignored. -func (f *FlagSet) AddFlagSet(newSet *FlagSet) { - if newSet == nil { - return - } - newSet.VisitAll(func(flag *Flag) { - if f.Lookup(flag.Name) == nil { - f.AddFlag(flag) - } - }) -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func Var(value Value, name string, usage string) { - CommandLine.VarP(value, name, "", usage) -} - -// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. -func VarP(value Value, name, shorthand, usage string) { - CommandLine.VarP(value, name, shorthand, usage) -} - -// failf prints to standard error a formatted error and usage message and -// returns the error. -func (f *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) - fmt.Fprintln(f.out(), err) - f.usage() - return err -} - -// usage calls the Usage method for the flag set, or the usage function if -// the flag set is CommandLine. -func (f *FlagSet) usage() { - if f == CommandLine { - Usage() - } else if f.Usage == nil { - defaultUsage(f) - } else { - f.Usage() - } -} - -func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { - a = args - name := s[2:] - if len(name) == 0 || name[0] == '-' || name[0] == '=' { - err = f.failf("bad flag syntax: %s", s) - return - } - - split := strings.SplitN(name, "=", 2) - name = split[0] - flag, exists := f.formal[f.normalizeFlagName(name)] - if !exists { - if name == "help" { // special case for nice help message. - f.usage() - return a, ErrHelp - } - err = f.failf("unknown flag: --%s", name) - return - } - - var value string - if len(split) == 2 { - // '--flag=arg' - value = split[1] - } else if flag.NoOptDefVal != "" { - // '--flag' (arg was optional) - value = flag.NoOptDefVal - } else if len(a) > 0 { - // '--flag arg' - value = a[0] - a = a[1:] - } else { - // '--flag' (arg was required) - err = f.failf("flag needs an argument: %s", s) - return - } - - err = fn(flag, value) - return -} - -func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) { - if strings.HasPrefix(shorthands, "test.") { - return - } - - outArgs = args - outShorts = shorthands[1:] - c := shorthands[0] - - flag, exists := f.shorthands[c] - if !exists { - if c == 'h' { // special case for nice help message. - f.usage() - err = ErrHelp - return - } - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) - return - } - - var value string - if len(shorthands) > 2 && shorthands[1] == '=' { - // '-f=arg' - value = shorthands[2:] - outShorts = "" - } else if flag.NoOptDefVal != "" { - // '-f' (arg was optional) - value = flag.NoOptDefVal - } else if len(shorthands) > 1 { - // '-farg' - value = shorthands[1:] - outShorts = "" - } else if len(args) > 0 { - // '-f arg' - value = args[0] - outArgs = args[1:] - } else { - // '-f' (arg was required) - err = f.failf("flag needs an argument: %q in -%s", c, shorthands) - return - } - - if flag.ShorthandDeprecated != "" { - fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) - } - - err = fn(flag, value) - return -} - -func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) { - a = args - shorthands := s[1:] - - // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv"). - for len(shorthands) > 0 { - shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn) - if err != nil { - return - } - } - - return -} - -func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) { - for len(args) > 0 { - s := args[0] - args = args[1:] - if len(s) == 0 || s[0] != '-' || len(s) == 1 { - if !f.interspersed { - f.args = append(f.args, s) - f.args = append(f.args, args...) - return nil - } - f.args = append(f.args, s) - continue - } - - if s[1] == '-' { - if len(s) == 2 { // "--" terminates the flags - f.argsLenAtDash = len(f.args) - f.args = append(f.args, args...) - break - } - args, err = f.parseLongArg(s, args, fn) - } else { - args, err = f.parseShortArg(s, args, fn) - } - if err != nil { - return - } - } - return -} - -// Parse parses flag definitions from the argument list, which should not -// include the command name. Must be called after all flags in the FlagSet -// are defined and before flags are accessed by the program. -// The return value will be ErrHelp if -help was set but not defined. -func (f *FlagSet) Parse(arguments []string) error { - f.parsed = true - - if len(arguments) < 0 { - return nil - } - - f.args = make([]string, 0, len(arguments)) - - set := func(flag *Flag, value string) error { - return f.Set(flag.Name, value) - } - - err := f.parseArgs(arguments, set) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -type parseFunc func(flag *Flag, value string) error - -// ParseAll parses flag definitions from the argument list, which should not -// include the command name. The arguments for fn are flag and value. Must be -// called after all flags in the FlagSet are defined and before flags are -// accessed by the program. The return value will be ErrHelp if -help was set -// but not defined. -func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error { - f.parsed = true - f.args = make([]string, 0, len(arguments)) - - err := f.parseArgs(arguments, fn) - if err != nil { - switch f.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -// Parsed reports whether f.Parse has been called. -func (f *FlagSet) Parsed() bool { - return f.parsed -} - -// Parse parses the command-line flags from os.Args[1:]. Must be called -// after all flags are defined and before flags are accessed by the program. -func Parse() { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.Parse(os.Args[1:]) -} - -// ParseAll parses the command-line flags from os.Args[1:] and called fn for each. -// The arguments for fn are flag and value. Must be called after all flags are -// defined and before flags are accessed by the program. -func ParseAll(fn func(flag *Flag, value string) error) { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.ParseAll(os.Args[1:], fn) -} - -// SetInterspersed sets whether to support interspersed option/non-option arguments. -func SetInterspersed(interspersed bool) { - CommandLine.SetInterspersed(interspersed) -} - -// Parsed returns true if the command-line flags have been parsed. -func Parsed() bool { - return CommandLine.Parsed() -} - -// CommandLine is the default set of command-line flags, parsed from os.Args. -var CommandLine = NewFlagSet(os.Args[0], ExitOnError) - -// NewFlagSet returns a new, empty flag set with the specified name, -// error handling property and SortFlags set to true. -func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { - f := &FlagSet{ - name: name, - errorHandling: errorHandling, - argsLenAtDash: -1, - interspersed: true, - SortFlags: true, - } - return f -} - -// SetInterspersed sets whether to support interspersed option/non-option arguments. -func (f *FlagSet) SetInterspersed(interspersed bool) { - f.interspersed = interspersed -} - -// Init sets the name and error handling property for a flag set. -// By default, the zero FlagSet uses an empty name and the -// ContinueOnError error handling policy. -func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { - f.name = name - f.errorHandling = errorHandling - f.argsLenAtDash = -1 -} diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go deleted file mode 100644 index a243f81f7..000000000 --- a/vendor/github.com/spf13/pflag/float32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- float32 Value -type float32Value float32 - -func newFloat32Value(val float32, p *float32) *float32Value { - *p = val - return (*float32Value)(p) -} - -func (f *float32Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 32) - *f = float32Value(v) - return err -} - -func (f *float32Value) Type() string { - return "float32" -} - -func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) } - -func float32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseFloat(sval, 32) - if err != nil { - return 0, err - } - return float32(v), nil -} - -// GetFloat32 return the float32 value of a flag with the given name -func (f *FlagSet) GetFloat32(name string) (float32, error) { - val, err := f.getFlagType(name, "float32", float32Conv) - if err != nil { - return 0, err - } - return val.(float32), nil -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - f.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32Var defines a float32 flag with specified name, default value, and usage string. -// The argument p points to a float32 variable in which to store the value of the flag. -func Float32Var(p *float32, name string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, "", usage) -} - -// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. -func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { - CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, "", value, usage) - return p -} - -// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { - p := new(float32) - f.Float32VarP(p, name, shorthand, value, usage) - return p -} - -// Float32 defines a float32 flag with specified name, default value, and usage string. -// The return value is the address of a float32 variable that stores the value of the flag. -func Float32(name string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, "", value, usage) -} - -// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. -func Float32P(name, shorthand string, value float32, usage string) *float32 { - return CommandLine.Float32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go deleted file mode 100644 index 04b5492a7..000000000 --- a/vendor/github.com/spf13/pflag/float64.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- float64 Value -type float64Value float64 - -func newFloat64Value(val float64, p *float64) *float64Value { - *p = val - return (*float64Value)(p) -} - -func (f *float64Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 64) - *f = float64Value(v) - return err -} - -func (f *float64Value) Type() string { - return "float64" -} - -func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) } - -func float64Conv(sval string) (interface{}, error) { - return strconv.ParseFloat(sval, 64) -} - -// GetFloat64 return the float64 value of a flag with the given name -func (f *FlagSet) GetFloat64(name string) (float64, error) { - val, err := f.getFlagType(name, "float64", float64Conv) - if err != nil { - return 0, err - } - return val.(float64), nil -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - f.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func Float64Var(p *float64, name string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, "", usage) -} - -// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. -func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { - CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, "", value, usage) - return p -} - -// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { - p := new(float64) - f.Float64VarP(p, name, shorthand, value, usage) - return p -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func Float64(name string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, "", value, usage) -} - -// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. -func Float64P(name, shorthand string, value float64, usage string) *float64 { - return CommandLine.Float64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go deleted file mode 100644 index c4f47ebe5..000000000 --- a/vendor/github.com/spf13/pflag/golangflag.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - goflag "flag" - "reflect" - "strings" -) - -// flagValueWrapper implements pflag.Value around a flag.Value. The main -// difference here is the addition of the Type method that returns a string -// name of the type. As this is generally unknown, we approximate that with -// reflection. -type flagValueWrapper struct { - inner goflag.Value - flagType string -} - -// We are just copying the boolFlag interface out of goflag as that is what -// they use to decide if a flag should get "true" when no arg is given. -type goBoolFlag interface { - goflag.Value - IsBoolFlag() bool -} - -func wrapFlagValue(v goflag.Value) Value { - // If the flag.Value happens to also be a pflag.Value, just use it directly. - if pv, ok := v.(Value); ok { - return pv - } - - pv := &flagValueWrapper{ - inner: v, - } - - t := reflect.TypeOf(v) - if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr { - t = t.Elem() - } - - pv.flagType = strings.TrimSuffix(t.Name(), "Value") - return pv -} - -func (v *flagValueWrapper) String() string { - return v.inner.String() -} - -func (v *flagValueWrapper) Set(s string) error { - return v.inner.Set(s) -} - -func (v *flagValueWrapper) Type() string { - return v.flagType -} - -// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag -// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei -// with both `-v` and `--v` in flags. If the golang flag was more than a single -// character (ex: `verbose`) it will only be accessible via `--verbose` -func PFlagFromGoFlag(goflag *goflag.Flag) *Flag { - // Remember the default value as a string; it won't change. - flag := &Flag{ - Name: goflag.Name, - Usage: goflag.Usage, - Value: wrapFlagValue(goflag.Value), - // Looks like golang flags don't set DefValue correctly :-( - //DefValue: goflag.DefValue, - DefValue: goflag.Value.String(), - } - // Ex: if the golang flag was -v, allow both -v and --v to work - if len(flag.Name) == 1 { - flag.Shorthand = flag.Name - } - if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() { - flag.NoOptDefVal = "true" - } - return flag -} - -// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet -func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) { - if f.Lookup(goflag.Name) != nil { - return - } - newflag := PFlagFromGoFlag(goflag) - f.AddFlag(newflag) -} - -// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet -func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { - if newSet == nil { - return - } - newSet.VisitAll(func(goflag *goflag.Flag) { - f.AddGoFlag(goflag) - }) -} diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go deleted file mode 100644 index 1474b89df..000000000 --- a/vendor/github.com/spf13/pflag/int.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- int Value -type intValue int - -func newIntValue(val int, p *int) *intValue { - *p = val - return (*intValue)(p) -} - -func (i *intValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = intValue(v) - return err -} - -func (i *intValue) Type() string { - return "int" -} - -func (i *intValue) String() string { return strconv.Itoa(int(*i)) } - -func intConv(sval string) (interface{}, error) { - return strconv.Atoi(sval) -} - -// GetInt return the int value of a flag with the given name -func (f *FlagSet) GetInt(name string) (int, error) { - val, err := f.getFlagType(name, "int", intConv) - if err != nil { - return 0, err - } - return val.(int), nil -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { - f.VarP(newIntValue(value, p), name, "", usage) -} - -// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { - f.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func IntVar(p *int, name string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, "", usage) -} - -// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. -func IntVarP(p *int, name, shorthand string, value int, usage string) { - CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func (f *FlagSet) Int(name string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, "", value, usage) - return p -} - -// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { - p := new(int) - f.IntVarP(p, name, shorthand, value, usage) - return p -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func Int(name string, value int, usage string) *int { - return CommandLine.IntP(name, "", value, usage) -} - -// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. -func IntP(name, shorthand string, value int, usage string) *int { - return CommandLine.IntP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go deleted file mode 100644 index 9b95944f0..000000000 --- a/vendor/github.com/spf13/pflag/int32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int32 Value -type int32Value int32 - -func newInt32Value(val int32, p *int32) *int32Value { - *p = val - return (*int32Value)(p) -} - -func (i *int32Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 32) - *i = int32Value(v) - return err -} - -func (i *int32Value) Type() string { - return "int32" -} - -func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 32) - if err != nil { - return 0, err - } - return int32(v), nil -} - -// GetInt32 return the int32 value of a flag with the given name -func (f *FlagSet) GetInt32(name string) (int32, error) { - val, err := f.getFlagType(name, "int32", int32Conv) - if err != nil { - return 0, err - } - return val.(int32), nil -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, "", usage) -} - -// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - f.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32Var defines an int32 flag with specified name, default value, and usage string. -// The argument p points to an int32 variable in which to store the value of the flag. -func Int32Var(p *int32, name string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, "", usage) -} - -// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. -func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { - CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, "", value, usage) - return p -} - -// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { - p := new(int32) - f.Int32VarP(p, name, shorthand, value, usage) - return p -} - -// Int32 defines an int32 flag with specified name, default value, and usage string. -// The return value is the address of an int32 variable that stores the value of the flag. -func Int32(name string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, "", value, usage) -} - -// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. -func Int32P(name, shorthand string, value int32, usage string) *int32 { - return CommandLine.Int32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go deleted file mode 100644 index 0026d781d..000000000 --- a/vendor/github.com/spf13/pflag/int64.go +++ /dev/null @@ -1,84 +0,0 @@ -package pflag - -import "strconv" - -// -- int64 Value -type int64Value int64 - -func newInt64Value(val int64, p *int64) *int64Value { - *p = val - return (*int64Value)(p) -} - -func (i *int64Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = int64Value(v) - return err -} - -func (i *int64Value) Type() string { - return "int64" -} - -func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int64Conv(sval string) (interface{}, error) { - return strconv.ParseInt(sval, 0, 64) -} - -// GetInt64 return the int64 value of a flag with the given name -func (f *FlagSet) GetInt64(name string) (int64, error) { - val, err := f.getFlagType(name, "int64", int64Conv) - if err != nil { - return 0, err - } - return val.(int64), nil -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, "", usage) -} - -// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - f.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func Int64Var(p *int64, name string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, "", usage) -} - -// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. -func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { - CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, "", value, usage) - return p -} - -// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { - p := new(int64) - f.Int64VarP(p, name, shorthand, value, usage) - return p -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func Int64(name string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, "", value, usage) -} - -// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. -func Int64P(name, shorthand string, value int64, usage string) *int64 { - return CommandLine.Int64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go deleted file mode 100644 index 4da92228e..000000000 --- a/vendor/github.com/spf13/pflag/int8.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- int8 Value -type int8Value int8 - -func newInt8Value(val int8, p *int8) *int8Value { - *p = val - return (*int8Value)(p) -} - -func (i *int8Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 8) - *i = int8Value(v) - return err -} - -func (i *int8Value) Type() string { - return "int8" -} - -func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) } - -func int8Conv(sval string) (interface{}, error) { - v, err := strconv.ParseInt(sval, 0, 8) - if err != nil { - return 0, err - } - return int8(v), nil -} - -// GetInt8 return the int8 value of a flag with the given name -func (f *FlagSet) GetInt8(name string) (int8, error) { - val, err := f.getFlagType(name, "int8", int8Conv) - if err != nil { - return 0, err - } - return val.(int8), nil -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, "", usage) -} - -// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - f.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8Var defines an int8 flag with specified name, default value, and usage string. -// The argument p points to an int8 variable in which to store the value of the flag. -func Int8Var(p *int8, name string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, "", usage) -} - -// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. -func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { - CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, "", value, usage) - return p -} - -// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { - p := new(int8) - f.Int8VarP(p, name, shorthand, value, usage) - return p -} - -// Int8 defines an int8 flag with specified name, default value, and usage string. -// The return value is the address of an int8 variable that stores the value of the flag. -func Int8(name string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, "", value, usage) -} - -// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. -func Int8P(name, shorthand string, value int8, usage string) *int8 { - return CommandLine.Int8P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go deleted file mode 100644 index 1e7c9edde..000000000 --- a/vendor/github.com/spf13/pflag/int_slice.go +++ /dev/null @@ -1,128 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- intSlice Value -type intSliceValue struct { - value *[]int - changed bool -} - -func newIntSliceValue(val []int, p *[]int) *intSliceValue { - isv := new(intSliceValue) - isv.value = p - *isv.value = val - return isv -} - -func (s *intSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]int, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return err - } - - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *intSliceValue) Type() string { - return "intSlice" -} - -func (s *intSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func intSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []int{}, nil - } - ss := strings.Split(val, ",") - out := make([]int, len(ss)) - for i, d := range ss { - var err error - out[i], err = strconv.Atoi(d) - if err != nil { - return nil, err - } - - } - return out, nil -} - -// GetIntSlice return the []int value of a flag with the given name -func (f *FlagSet) GetIntSlice(name string) ([]int, error) { - val, err := f.getFlagType(name, "intSlice", intSliceConv) - if err != nil { - return []int{}, err - } - return val.([]int), nil -} - -// IntSliceVar defines a intSlice flag with specified name, default value, and usage string. -// The argument p points to a []int variable in which to store the value of the flag. -func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) { - f.VarP(newIntSliceValue(value, p), name, "", usage) -} - -// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { - f.VarP(newIntSliceValue(value, p), name, shorthand, usage) -} - -// IntSliceVar defines a int[] flag with specified name, default value, and usage string. -// The argument p points to a int[] variable in which to store the value of the flag. -func IntSliceVar(p *[]int, name string, value []int, usage string) { - CommandLine.VarP(newIntSliceValue(value, p), name, "", usage) -} - -// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. -func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { - CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage) -} - -// IntSlice defines a []int flag with specified name, default value, and usage string. -// The return value is the address of a []int variable that stores the value of the flag. -func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int { - p := []int{} - f.IntSliceVarP(&p, name, "", value, usage) - return &p -} - -// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int { - p := []int{} - f.IntSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// IntSlice defines a []int flag with specified name, default value, and usage string. -// The return value is the address of a []int variable that stores the value of the flag. -func IntSlice(name string, value []int, usage string) *[]int { - return CommandLine.IntSliceP(name, "", value, usage) -} - -// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. -func IntSliceP(name, shorthand string, value []int, usage string) *[]int { - return CommandLine.IntSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go deleted file mode 100644 index 3d414ba69..000000000 --- a/vendor/github.com/spf13/pflag/ip.go +++ /dev/null @@ -1,94 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strings" -) - -// -- net.IP value -type ipValue net.IP - -func newIPValue(val net.IP, p *net.IP) *ipValue { - *p = val - return (*ipValue)(p) -} - -func (i *ipValue) String() string { return net.IP(*i).String() } -func (i *ipValue) Set(s string) error { - ip := net.ParseIP(strings.TrimSpace(s)) - if ip == nil { - return fmt.Errorf("failed to parse IP: %q", s) - } - *i = ipValue(ip) - return nil -} - -func (i *ipValue) Type() string { - return "ip" -} - -func ipConv(sval string) (interface{}, error) { - ip := net.ParseIP(sval) - if ip != nil { - return ip, nil - } - return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) -} - -// GetIP return the net.IP value of a flag with the given name -func (f *FlagSet) GetIP(name string) (net.IP, error) { - val, err := f.getFlagType(name, "ip", ipConv) - if err != nil { - return nil, err - } - return val.(net.IP), nil -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, "", usage) -} - -// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - f.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IPVar defines an net.IP flag with specified name, default value, and usage string. -// The argument p points to an net.IP variable in which to store the value of the flag. -func IPVar(p *net.IP, name string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, "", usage) -} - -// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. -func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { - CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, "", value, usage) - return p -} - -// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { - p := new(net.IP) - f.IPVarP(p, name, shorthand, value, usage) - return p -} - -// IP defines an net.IP flag with specified name, default value, and usage string. -// The return value is the address of an net.IP variable that stores the value of the flag. -func IP(name string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, "", value, usage) -} - -// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. -func IPP(name, shorthand string, value net.IP, usage string) *net.IP { - return CommandLine.IPP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go deleted file mode 100644 index 7dd196fe3..000000000 --- a/vendor/github.com/spf13/pflag/ip_slice.go +++ /dev/null @@ -1,148 +0,0 @@ -package pflag - -import ( - "fmt" - "io" - "net" - "strings" -) - -// -- ipSlice Value -type ipSliceValue struct { - value *[]net.IP - changed bool -} - -func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue { - ipsv := new(ipSliceValue) - ipsv.value = p - *ipsv.value = val - return ipsv -} - -// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag. -// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended. -func (s *ipSliceValue) Set(val string) error { - - // remove all quote characters - rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") - - // read flag arguments with CSV parser - ipStrSlice, err := readAsCSV(rmQuote.Replace(val)) - if err != nil && err != io.EOF { - return err - } - - // parse ip values into slice - out := make([]net.IP, 0, len(ipStrSlice)) - for _, ipStr := range ipStrSlice { - ip := net.ParseIP(strings.TrimSpace(ipStr)) - if ip == nil { - return fmt.Errorf("invalid string being converted to IP address: %s", ipStr) - } - out = append(out, ip) - } - - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - - s.changed = true - - return nil -} - -// Type returns a string that uniquely represents this flag's type. -func (s *ipSliceValue) Type() string { - return "ipSlice" -} - -// String defines a "native" format for this net.IP slice flag value. -func (s *ipSliceValue) String() string { - - ipStrSlice := make([]string, len(*s.value)) - for i, ip := range *s.value { - ipStrSlice[i] = ip.String() - } - - out, _ := writeAsCSV(ipStrSlice) - - return "[" + out + "]" -} - -func ipSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry - if len(val) == 0 { - return []net.IP{}, nil - } - ss := strings.Split(val, ",") - out := make([]net.IP, len(ss)) - for i, sval := range ss { - ip := net.ParseIP(strings.TrimSpace(sval)) - if ip == nil { - return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval) - } - out[i] = ip - } - return out, nil -} - -// GetIPSlice returns the []net.IP value of a flag with the given name -func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) { - val, err := f.getFlagType(name, "ipSlice", ipSliceConv) - if err != nil { - return []net.IP{}, err - } - return val.([]net.IP), nil -} - -// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string. -// The argument p points to a []net.IP variable in which to store the value of the flag. -func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { - f.VarP(newIPSliceValue(value, p), name, "", usage) -} - -// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { - f.VarP(newIPSliceValue(value, p), name, shorthand, usage) -} - -// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string. -// The argument p points to a []net.IP variable in which to store the value of the flag. -func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) { - CommandLine.VarP(newIPSliceValue(value, p), name, "", usage) -} - -// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash. -func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) { - CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage) -} - -// IPSlice defines a []net.IP flag with specified name, default value, and usage string. -// The return value is the address of a []net.IP variable that stores the value of that flag. -func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP { - p := []net.IP{} - f.IPSliceVarP(&p, name, "", value, usage) - return &p -} - -// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { - p := []net.IP{} - f.IPSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// IPSlice defines a []net.IP flag with specified name, default value, and usage string. -// The return value is the address of a []net.IP variable that stores the value of the flag. -func IPSlice(name string, value []net.IP, usage string) *[]net.IP { - return CommandLine.IPSliceP(name, "", value, usage) -} - -// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash. -func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP { - return CommandLine.IPSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go deleted file mode 100644 index 5bd44bd21..000000000 --- a/vendor/github.com/spf13/pflag/ipmask.go +++ /dev/null @@ -1,122 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strconv" -) - -// -- net.IPMask value -type ipMaskValue net.IPMask - -func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue { - *p = val - return (*ipMaskValue)(p) -} - -func (i *ipMaskValue) String() string { return net.IPMask(*i).String() } -func (i *ipMaskValue) Set(s string) error { - ip := ParseIPv4Mask(s) - if ip == nil { - return fmt.Errorf("failed to parse IP mask: %q", s) - } - *i = ipMaskValue(ip) - return nil -} - -func (i *ipMaskValue) Type() string { - return "ipMask" -} - -// ParseIPv4Mask written in IP form (e.g. 255.255.255.0). -// This function should really belong to the net package. -func ParseIPv4Mask(s string) net.IPMask { - mask := net.ParseIP(s) - if mask == nil { - if len(s) != 8 { - return nil - } - // net.IPMask.String() actually outputs things like ffffff00 - // so write a horrible parser for that as well :-( - m := []int{} - for i := 0; i < 4; i++ { - b := "0x" + s[2*i:2*i+2] - d, err := strconv.ParseInt(b, 0, 0) - if err != nil { - return nil - } - m = append(m, int(d)) - } - s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3]) - mask = net.ParseIP(s) - if mask == nil { - return nil - } - } - return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15]) -} - -func parseIPv4Mask(sval string) (interface{}, error) { - mask := ParseIPv4Mask(sval) - if mask == nil { - return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval) - } - return mask, nil -} - -// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name -func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) { - val, err := f.getFlagType(name, "ipMask", parseIPv4Mask) - if err != nil { - return nil, err - } - return val.(net.IPMask), nil -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - f.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string. -// The argument p points to an net.IPMask variable in which to store the value of the flag. -func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) -} - -// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. -func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { - CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, "", value, usage) - return p -} - -// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - p := new(net.IPMask) - f.IPMaskVarP(p, name, shorthand, value, usage) - return p -} - -// IPMask defines an net.IPMask flag with specified name, default value, and usage string. -// The return value is the address of an net.IPMask variable that stores the value of the flag. -func IPMask(name string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, "", value, usage) -} - -// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash. -func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { - return CommandLine.IPMaskP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go deleted file mode 100644 index e2c1b8bcd..000000000 --- a/vendor/github.com/spf13/pflag/ipnet.go +++ /dev/null @@ -1,98 +0,0 @@ -package pflag - -import ( - "fmt" - "net" - "strings" -) - -// IPNet adapts net.IPNet for use as a flag. -type ipNetValue net.IPNet - -func (ipnet ipNetValue) String() string { - n := net.IPNet(ipnet) - return n.String() -} - -func (ipnet *ipNetValue) Set(value string) error { - _, n, err := net.ParseCIDR(strings.TrimSpace(value)) - if err != nil { - return err - } - *ipnet = ipNetValue(*n) - return nil -} - -func (*ipNetValue) Type() string { - return "ipNet" -} - -func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue { - *p = val - return (*ipNetValue)(p) -} - -func ipNetConv(sval string) (interface{}, error) { - _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) - if err == nil { - return *n, nil - } - return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval) -} - -// GetIPNet return the net.IPNet value of a flag with the given name -func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) { - val, err := f.getFlagType(name, "ipNet", ipNetConv) - if err != nil { - return net.IPNet{}, err - } - return val.(net.IPNet), nil -} - -// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. -// The argument p points to an net.IPNet variable in which to store the value of the flag. -func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { - f.VarP(newIPNetValue(value, p), name, "", usage) -} - -// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { - f.VarP(newIPNetValue(value, p), name, shorthand, usage) -} - -// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. -// The argument p points to an net.IPNet variable in which to store the value of the flag. -func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { - CommandLine.VarP(newIPNetValue(value, p), name, "", usage) -} - -// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. -func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { - CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage) -} - -// IPNet defines an net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of an net.IPNet variable that stores the value of the flag. -func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet { - p := new(net.IPNet) - f.IPNetVarP(p, name, "", value, usage) - return p -} - -// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { - p := new(net.IPNet) - f.IPNetVarP(p, name, shorthand, value, usage) - return p -} - -// IPNet defines an net.IPNet flag with specified name, default value, and usage string. -// The return value is the address of an net.IPNet variable that stores the value of the flag. -func IPNet(name string, value net.IPNet, usage string) *net.IPNet { - return CommandLine.IPNetP(name, "", value, usage) -} - -// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. -func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { - return CommandLine.IPNetP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go deleted file mode 100644 index 04e0a26ff..000000000 --- a/vendor/github.com/spf13/pflag/string.go +++ /dev/null @@ -1,80 +0,0 @@ -package pflag - -// -- string Value -type stringValue string - -func newStringValue(val string, p *string) *stringValue { - *p = val - return (*stringValue)(p) -} - -func (s *stringValue) Set(val string) error { - *s = stringValue(val) - return nil -} -func (s *stringValue) Type() string { - return "string" -} - -func (s *stringValue) String() string { return string(*s) } - -func stringConv(sval string) (interface{}, error) { - return sval, nil -} - -// GetString return the string value of a flag with the given name -func (f *FlagSet) GetString(name string) (string, error) { - val, err := f.getFlagType(name, "string", stringConv) - if err != nil { - return "", err - } - return val.(string), nil -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { - f.VarP(newStringValue(value, p), name, "", usage) -} - -// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { - f.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func StringVar(p *string, name string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, "", usage) -} - -// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. -func StringVarP(p *string, name, shorthand string, value string, usage string) { - CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func (f *FlagSet) String(name string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, "", value, usage) - return p -} - -// StringP is like String, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { - p := new(string) - f.StringVarP(p, name, shorthand, value, usage) - return p -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func String(name string, value string, usage string) *string { - return CommandLine.StringP(name, "", value, usage) -} - -// StringP is like String, but accepts a shorthand letter that can be used after a single dash. -func StringP(name, shorthand string, value string, usage string) *string { - return CommandLine.StringP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go deleted file mode 100644 index 276b7ed49..000000000 --- a/vendor/github.com/spf13/pflag/string_array.go +++ /dev/null @@ -1,103 +0,0 @@ -package pflag - -// -- stringArray Value -type stringArrayValue struct { - value *[]string - changed bool -} - -func newStringArrayValue(val []string, p *[]string) *stringArrayValue { - ssv := new(stringArrayValue) - ssv.value = p - *ssv.value = val - return ssv -} - -func (s *stringArrayValue) Set(val string) error { - if !s.changed { - *s.value = []string{val} - s.changed = true - } else { - *s.value = append(*s.value, val) - } - return nil -} - -func (s *stringArrayValue) Type() string { - return "stringArray" -} - -func (s *stringArrayValue) String() string { - str, _ := writeAsCSV(*s.value) - return "[" + str + "]" -} - -func stringArrayConv(sval string) (interface{}, error) { - sval = sval[1 : len(sval)-1] - // An empty string would cause a array with one (empty) string - if len(sval) == 0 { - return []string{}, nil - } - return readAsCSV(sval) -} - -// GetStringArray return the []string value of a flag with the given name -func (f *FlagSet) GetStringArray(name string) ([]string, error) { - val, err := f.getFlagType(name, "stringArray", stringArrayConv) - if err != nil { - return []string{}, err - } - return val.([]string), nil -} - -// StringArrayVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) { - f.VarP(newStringArrayValue(value, p), name, "", usage) -} - -// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { - f.VarP(newStringArrayValue(value, p), name, shorthand, usage) -} - -// StringArrayVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma -func StringArrayVar(p *[]string, name string, value []string, usage string) { - CommandLine.VarP(newStringArrayValue(value, p), name, "", usage) -} - -// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. -func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { - CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage) -} - -// StringArray defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string { - p := []string{} - f.StringArrayVarP(&p, name, "", value, usage) - return &p -} - -// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string { - p := []string{} - f.StringArrayVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringArray defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma -func StringArray(name string, value []string, usage string) *[]string { - return CommandLine.StringArrayP(name, "", value, usage) -} - -// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. -func StringArrayP(name, shorthand string, value []string, usage string) *[]string { - return CommandLine.StringArrayP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go deleted file mode 100644 index 05eee7543..000000000 --- a/vendor/github.com/spf13/pflag/string_slice.go +++ /dev/null @@ -1,129 +0,0 @@ -package pflag - -import ( - "bytes" - "encoding/csv" - "strings" -) - -// -- stringSlice Value -type stringSliceValue struct { - value *[]string - changed bool -} - -func newStringSliceValue(val []string, p *[]string) *stringSliceValue { - ssv := new(stringSliceValue) - ssv.value = p - *ssv.value = val - return ssv -} - -func readAsCSV(val string) ([]string, error) { - if val == "" { - return []string{}, nil - } - stringReader := strings.NewReader(val) - csvReader := csv.NewReader(stringReader) - return csvReader.Read() -} - -func writeAsCSV(vals []string) (string, error) { - b := &bytes.Buffer{} - w := csv.NewWriter(b) - err := w.Write(vals) - if err != nil { - return "", err - } - w.Flush() - return strings.TrimSuffix(b.String(), "\n"), nil -} - -func (s *stringSliceValue) Set(val string) error { - v, err := readAsCSV(val) - if err != nil { - return err - } - if !s.changed { - *s.value = v - } else { - *s.value = append(*s.value, v...) - } - s.changed = true - return nil -} - -func (s *stringSliceValue) Type() string { - return "stringSlice" -} - -func (s *stringSliceValue) String() string { - str, _ := writeAsCSV(*s.value) - return "[" + str + "]" -} - -func stringSliceConv(sval string) (interface{}, error) { - sval = sval[1 : len(sval)-1] - // An empty string would cause a slice with one (empty) string - if len(sval) == 0 { - return []string{}, nil - } - return readAsCSV(sval) -} - -// GetStringSlice return the []string value of a flag with the given name -func (f *FlagSet) GetStringSlice(name string) ([]string, error) { - val, err := f.getFlagType(name, "stringSlice", stringSliceConv) - if err != nil { - return []string{}, err - } - return val.([]string), nil -} - -// StringSliceVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { - f.VarP(newStringSliceValue(value, p), name, "", usage) -} - -// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { - f.VarP(newStringSliceValue(value, p), name, shorthand, usage) -} - -// StringSliceVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a []string variable in which to store the value of the flag. -func StringSliceVar(p *[]string, name string, value []string, usage string) { - CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) -} - -// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. -func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { - CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage) -} - -// StringSlice defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { - p := []string{} - f.StringSliceVarP(&p, name, "", value, usage) - return &p -} - -// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string { - p := []string{} - f.StringSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// StringSlice defines a string flag with specified name, default value, and usage string. -// The return value is the address of a []string variable that stores the value of the flag. -func StringSlice(name string, value []string, usage string) *[]string { - return CommandLine.StringSliceP(name, "", value, usage) -} - -// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. -func StringSliceP(name, shorthand string, value []string, usage string) *[]string { - return CommandLine.StringSliceP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go deleted file mode 100644 index dcbc2b758..000000000 --- a/vendor/github.com/spf13/pflag/uint.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint Value -type uintValue uint - -func newUintValue(val uint, p *uint) *uintValue { - *p = val - return (*uintValue)(p) -} - -func (i *uintValue) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uintValue(v) - return err -} - -func (i *uintValue) Type() string { - return "uint" -} - -func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uintConv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 0) - if err != nil { - return 0, err - } - return uint(v), nil -} - -// GetUint return the uint value of a flag with the given name -func (f *FlagSet) GetUint(name string) (uint, error) { - val, err := f.getFlagType(name, "uint", uintConv) - if err != nil { - return 0, err - } - return val.(uint), nil -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, "", usage) -} - -// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { - f.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func UintVar(p *uint, name string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, "", usage) -} - -// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. -func UintVarP(p *uint, name, shorthand string, value uint, usage string) { - CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint(name string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, "", value, usage) - return p -} - -// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { - p := new(uint) - f.UintVarP(p, name, shorthand, value, usage) - return p -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint(name string, value uint, usage string) *uint { - return CommandLine.UintP(name, "", value, usage) -} - -// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. -func UintP(name, shorthand string, value uint, usage string) *uint { - return CommandLine.UintP(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go deleted file mode 100644 index 7e9914edd..000000000 --- a/vendor/github.com/spf13/pflag/uint16.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint16 value -type uint16Value uint16 - -func newUint16Value(val uint16, p *uint16) *uint16Value { - *p = val - return (*uint16Value)(p) -} - -func (i *uint16Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 16) - *i = uint16Value(v) - return err -} - -func (i *uint16Value) Type() string { - return "uint16" -} - -func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint16Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 16) - if err != nil { - return 0, err - } - return uint16(v), nil -} - -// GetUint16 return the uint16 value of a flag with the given name -func (f *FlagSet) GetUint16(name string) (uint16, error) { - val, err := f.getFlagType(name, "uint16", uint16Conv) - if err != nil { - return 0, err - } - return val.(uint16), nil -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, "", usage) -} - -// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - f.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16Var defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func Uint16Var(p *uint16, name string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, "", usage) -} - -// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. -func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { - CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, "", value, usage) - return p -} - -// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - p := new(uint16) - f.Uint16VarP(p, name, shorthand, value, usage) - return p -} - -// Uint16 defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint16(name string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, "", value, usage) -} - -// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. -func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { - return CommandLine.Uint16P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go deleted file mode 100644 index d8024539b..000000000 --- a/vendor/github.com/spf13/pflag/uint32.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint32 value -type uint32Value uint32 - -func newUint32Value(val uint32, p *uint32) *uint32Value { - *p = val - return (*uint32Value)(p) -} - -func (i *uint32Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 32) - *i = uint32Value(v) - return err -} - -func (i *uint32Value) Type() string { - return "uint32" -} - -func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint32Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 32) - if err != nil { - return 0, err - } - return uint32(v), nil -} - -// GetUint32 return the uint32 value of a flag with the given name -func (f *FlagSet) GetUint32(name string) (uint32, error) { - val, err := f.getFlagType(name, "uint32", uint32Conv) - if err != nil { - return 0, err - } - return val.(uint32), nil -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, "", usage) -} - -// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - f.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32Var defines a uint32 flag with specified name, default value, and usage string. -// The argument p points to a uint32 variable in which to store the value of the flag. -func Uint32Var(p *uint32, name string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, "", usage) -} - -// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. -func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { - CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, "", value, usage) - return p -} - -// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - p := new(uint32) - f.Uint32VarP(p, name, shorthand, value, usage) - return p -} - -// Uint32 defines a uint32 flag with specified name, default value, and usage string. -// The return value is the address of a uint32 variable that stores the value of the flag. -func Uint32(name string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, "", value, usage) -} - -// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. -func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { - return CommandLine.Uint32P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go deleted file mode 100644 index f62240f2c..000000000 --- a/vendor/github.com/spf13/pflag/uint64.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint64 Value -type uint64Value uint64 - -func newUint64Value(val uint64, p *uint64) *uint64Value { - *p = val - return (*uint64Value)(p) -} - -func (i *uint64Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uint64Value(v) - return err -} - -func (i *uint64Value) Type() string { - return "uint64" -} - -func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint64Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 64) - if err != nil { - return 0, err - } - return uint64(v), nil -} - -// GetUint64 return the uint64 value of a flag with the given name -func (f *FlagSet) GetUint64(name string) (uint64, error) { - val, err := f.getFlagType(name, "uint64", uint64Conv) - if err != nil { - return 0, err - } - return val.(uint64), nil -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, "", usage) -} - -// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - f.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func Uint64Var(p *uint64, name string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, "", usage) -} - -// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. -func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { - CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, "", value, usage) - return p -} - -// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - p := new(uint64) - f.Uint64VarP(p, name, shorthand, value, usage) - return p -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func Uint64(name string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, "", value, usage) -} - -// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. -func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { - return CommandLine.Uint64P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go deleted file mode 100644 index bb0e83c1f..000000000 --- a/vendor/github.com/spf13/pflag/uint8.go +++ /dev/null @@ -1,88 +0,0 @@ -package pflag - -import "strconv" - -// -- uint8 Value -type uint8Value uint8 - -func newUint8Value(val uint8, p *uint8) *uint8Value { - *p = val - return (*uint8Value)(p) -} - -func (i *uint8Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 8) - *i = uint8Value(v) - return err -} - -func (i *uint8Value) Type() string { - return "uint8" -} - -func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) } - -func uint8Conv(sval string) (interface{}, error) { - v, err := strconv.ParseUint(sval, 0, 8) - if err != nil { - return 0, err - } - return uint8(v), nil -} - -// GetUint8 return the uint8 value of a flag with the given name -func (f *FlagSet) GetUint8(name string) (uint8, error) { - val, err := f.getFlagType(name, "uint8", uint8Conv) - if err != nil { - return 0, err - } - return val.(uint8), nil -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, "", usage) -} - -// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - f.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8Var defines a uint8 flag with specified name, default value, and usage string. -// The argument p points to a uint8 variable in which to store the value of the flag. -func Uint8Var(p *uint8, name string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, "", usage) -} - -// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. -func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { - CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, "", value, usage) - return p -} - -// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - p := new(uint8) - f.Uint8VarP(p, name, shorthand, value, usage) - return p -} - -// Uint8 defines a uint8 flag with specified name, default value, and usage string. -// The return value is the address of a uint8 variable that stores the value of the flag. -func Uint8(name string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, "", value, usage) -} - -// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. -func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { - return CommandLine.Uint8P(name, shorthand, value, usage) -} diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go deleted file mode 100644 index edd94c600..000000000 --- a/vendor/github.com/spf13/pflag/uint_slice.go +++ /dev/null @@ -1,126 +0,0 @@ -package pflag - -import ( - "fmt" - "strconv" - "strings" -) - -// -- uintSlice Value -type uintSliceValue struct { - value *[]uint - changed bool -} - -func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue { - uisv := new(uintSliceValue) - uisv.value = p - *uisv.value = val - return uisv -} - -func (s *uintSliceValue) Set(val string) error { - ss := strings.Split(val, ",") - out := make([]uint, len(ss)) - for i, d := range ss { - u, err := strconv.ParseUint(d, 10, 0) - if err != nil { - return err - } - out[i] = uint(u) - } - if !s.changed { - *s.value = out - } else { - *s.value = append(*s.value, out...) - } - s.changed = true - return nil -} - -func (s *uintSliceValue) Type() string { - return "uintSlice" -} - -func (s *uintSliceValue) String() string { - out := make([]string, len(*s.value)) - for i, d := range *s.value { - out[i] = fmt.Sprintf("%d", d) - } - return "[" + strings.Join(out, ",") + "]" -} - -func uintSliceConv(val string) (interface{}, error) { - val = strings.Trim(val, "[]") - // Empty string would cause a slice with one (empty) entry - if len(val) == 0 { - return []uint{}, nil - } - ss := strings.Split(val, ",") - out := make([]uint, len(ss)) - for i, d := range ss { - u, err := strconv.ParseUint(d, 10, 0) - if err != nil { - return nil, err - } - out[i] = uint(u) - } - return out, nil -} - -// GetUintSlice returns the []uint value of a flag with the given name. -func (f *FlagSet) GetUintSlice(name string) ([]uint, error) { - val, err := f.getFlagType(name, "uintSlice", uintSliceConv) - if err != nil { - return []uint{}, err - } - return val.([]uint), nil -} - -// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string. -// The argument p points to a []uint variable in which to store the value of the flag. -func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) { - f.VarP(newUintSliceValue(value, p), name, "", usage) -} - -// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { - f.VarP(newUintSliceValue(value, p), name, shorthand, usage) -} - -// UintSliceVar defines a uint[] flag with specified name, default value, and usage string. -// The argument p points to a uint[] variable in which to store the value of the flag. -func UintSliceVar(p *[]uint, name string, value []uint, usage string) { - CommandLine.VarP(newUintSliceValue(value, p), name, "", usage) -} - -// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash. -func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) { - CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage) -} - -// UintSlice defines a []uint flag with specified name, default value, and usage string. -// The return value is the address of a []uint variable that stores the value of the flag. -func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint { - p := []uint{} - f.UintSliceVarP(&p, name, "", value, usage) - return &p -} - -// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. -func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { - p := []uint{} - f.UintSliceVarP(&p, name, shorthand, value, usage) - return &p -} - -// UintSlice defines a []uint flag with specified name, default value, and usage string. -// The return value is the address of a []uint variable that stores the value of the flag. -func UintSlice(name string, value []uint, usage string) *[]uint { - return CommandLine.UintSliceP(name, "", value, usage) -} - -// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash. -func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint { - return CommandLine.UintSliceP(name, shorthand, value, usage) -} From 6c6499bdd52e2f05bc65a2f9aefeab5e16ebaa22 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Mon, 10 Jul 2017 14:28:36 +0100 Subject: [PATCH 4/4] Update docker/docker vendor to 6978a6e25a2e6063f280ec842bd0f3eae99426e1 This includes https://github.com/moby/moby/pull/34040 which fixes Windows build issues. Note that this pulls in more than 500 (non merge) commits as well as the fix we are interested in. A couple of new deps are pulled in, versions taken from vendor/github.com/docker/docker/vendor.conf. Signed-off-by: Ian Campbell --- vendor.conf | 4 +- vendor/github.com/docker/docker/README.md | 2 +- vendor/github.com/docker/docker/api/README.md | 4 +- vendor/github.com/docker/docker/api/common.go | 105 +- .../docker/docker/api/types/client.go | 19 +- .../docker/docker/api/types/configs.go | 1 + .../docker/api/types/container/config.go | 6 + .../api/types/container/waitcondition.go | 22 + .../docker/docker/api/types/events/events.go | 8 + .../docker/api/types/network/network.go | 6 + .../docker/api/types/registry/registry.go | 21 +- .../docker/docker/api/types/swarm/common.go | 13 + .../docker/docker/api/types/swarm/config.go | 31 + .../docker/api/types/swarm/container.go | 1 + .../docker/docker/api/types/swarm/network.go | 23 +- .../docker/docker/api/types/swarm/node.go | 1 + .../docker/api/types/swarm/runtime/gen.go | 3 + .../api/types/swarm/runtime/plugin.pb.go | 712 +++++++++ .../api/types/swarm/runtime/plugin.proto | 18 + .../docker/docker/api/types/swarm/swarm.go | 20 +- .../docker/docker/api/types/swarm/task.go | 20 +- .../docker/docker/api/types/types.go | 54 +- .../docker/docker/api/types/volume.go | 3 + .../docker/docker/client/build_prune.go | 30 + .../github.com/docker/docker/client/client.go | 69 +- .../docker/docker/client/config_create.go | 25 + .../docker/docker/client/config_inspect.go | 37 + .../docker/docker/client/config_list.go | 38 + .../docker/docker/client/config_remove.go | 13 + .../docker/docker/client/config_update.go | 21 + .../docker/docker/client/container_attach.go | 20 + .../docker/docker/client/container_copy.go | 7 +- .../docker/docker/client/container_logs.go | 20 + .../docker/docker/client/container_wait.go | 82 +- .../docker/client/distribution_inspect.go | 35 + .../github.com/docker/docker/client/errors.go | 22 + .../github.com/docker/docker/client/hijack.go | 85 +- .../docker/docker/client/image_build.go | 3 + .../docker/docker/client/interface.go | 29 +- .../docker/docker/client/network_inspect.go | 11 +- .../docker/docker/client/parse_logs.go | 41 + .../github.com/docker/docker/client/ping.go | 18 +- .../docker/docker/client/plugin_upgrade.go | 6 +- .../docker/docker/client/request.go | 77 +- .../docker/docker/client/secret_create.go | 3 + .../docker/docker/client/secret_inspect.go | 3 + .../docker/docker/client/secret_list.go | 3 + .../docker/docker/client/secret_remove.go | 3 + .../docker/docker/client/secret_update.go | 5 +- .../docker/docker/client/service_create.go | 132 +- .../docker/docker/client/service_update.go | 55 +- .../docker/docker/client/session.go | 19 + .../github.com/docker/docker/client/utils.go | 3 +- .../github.com/docker/docker/hack/README.md | 60 + .../hack/integration-cli-on-swarm/README.md | 2 + .../docker/docker/pkg/ioutils/fmt.go | 22 - .../docker/docker/pkg/ioutils/multireader.go | 224 --- .../docker/docker/pkg/mount/flags.go | 149 ++ .../docker/docker/pkg/mount/flags_freebsd.go | 49 + .../docker/docker/pkg/mount/flags_linux.go | 87 ++ .../docker/pkg/mount/flags_unsupported.go | 31 + .../docker/docker/pkg/mount/mount.go | 86 ++ .../docker/pkg/mount/mounter_freebsd.go | 59 + .../docker/docker/pkg/mount/mounter_linux.go | 56 + .../docker/pkg/mount/mounter_solaris.go | 33 + .../docker/pkg/mount/mounter_unsupported.go | 11 + .../docker/docker/pkg/mount/mountinfo.go | 54 + .../docker/pkg/mount/mountinfo_freebsd.go | 41 + .../docker/pkg/mount/mountinfo_linux.go | 95 ++ .../docker/pkg/mount/mountinfo_solaris.go | 37 + .../docker/pkg/mount/mountinfo_unsupported.go | 12 + .../docker/pkg/mount/mountinfo_windows.go | 6 + .../docker/pkg/mount/sharedsubtree_linux.go | 69 + .../docker/pkg/mount/sharedsubtree_solaris.go | 58 + .../docker/docker/pkg/system/chtimes.go | 17 - .../docker/docker/pkg/system/filesys.go | 9 +- .../docker/pkg/system/filesys_windows.go | 29 +- .../docker/docker/pkg/system/init.go | 22 + .../docker/docker/pkg/system/init_windows.go | 17 + .../docker/docker/pkg/system/lcow_unix.go | 8 + .../docker/docker/pkg/system/lcow_windows.go | 6 + .../docker/docker/pkg/system/path.go | 21 + .../docker/docker/pkg/system/path_unix.go | 5 - .../docker/docker/pkg/system/path_windows.go | 6 +- .../github.com/docker/docker/pkg/system/rm.go | 80 + .../docker/pkg/system/syscall_windows.go | 21 +- .../pkg/tlsconfig/tlsconfig_clone_go16.go | 31 - vendor/github.com/docker/docker/vendor.conf | 65 +- vendor/github.com/gogo/protobuf/LICENSE | 36 + vendor/github.com/gogo/protobuf/README | 258 ++++ vendor/github.com/gogo/protobuf/Readme.md | 117 ++ .../github.com/gogo/protobuf/proto/clone.go | 234 +++ .../github.com/gogo/protobuf/proto/decode.go | 978 ++++++++++++ .../gogo/protobuf/proto/decode_gogo.go | 172 +++ .../gogo/protobuf/proto/duration.go | 100 ++ .../gogo/protobuf/proto/duration_gogo.go | 203 +++ .../github.com/gogo/protobuf/proto/encode.go | 1362 +++++++++++++++++ .../gogo/protobuf/proto/encode_gogo.go | 350 +++++ .../github.com/gogo/protobuf/proto/equal.go | 300 ++++ .../gogo/protobuf/proto/extensions.go | 693 +++++++++ .../gogo/protobuf/proto/extensions_gogo.go | 294 ++++ vendor/github.com/gogo/protobuf/proto/lib.go | 898 +++++++++++ .../gogo/protobuf/proto/lib_gogo.go | 42 + .../gogo/protobuf/proto/message_set.go | 311 ++++ .../gogo/protobuf/proto/pointer_reflect.go | 484 ++++++ .../protobuf/proto/pointer_reflect_gogo.go | 85 + .../gogo/protobuf/proto/pointer_unsafe.go | 270 ++++ .../protobuf/proto/pointer_unsafe_gogo.go | 128 ++ .../gogo/protobuf/proto/properties.go | 968 ++++++++++++ .../gogo/protobuf/proto/properties_gogo.go | 111 ++ .../gogo/protobuf/proto/skip_gogo.go | 119 ++ vendor/github.com/gogo/protobuf/proto/text.go | 928 +++++++++++ .../gogo/protobuf/proto/text_gogo.go | 57 + .../gogo/protobuf/proto/text_parser.go | 1013 ++++++++++++ .../gogo/protobuf/proto/timestamp.go | 113 ++ .../gogo/protobuf/proto/timestamp_gogo.go | 229 +++ .../protobuf/google/protobuf/any.proto | 140 ++ .../google/protobuf/compiler/plugin.proto | 150 ++ .../protobuf/google/protobuf/descriptor.proto | 813 ++++++++++ .../protobuf/google/protobuf/duration.proto | 98 ++ .../protobuf/google/protobuf/empty.proto | 53 + .../protobuf/google/protobuf/field_mask.proto | 246 +++ .../protobuf/google/protobuf/struct.proto | 96 ++ .../protobuf/google/protobuf/timestamp.proto | 111 ++ .../protobuf/google/protobuf/wrappers.proto | 119 ++ .../opencontainers/image-spec/LICENSE | 191 +++ .../opencontainers/image-spec/README.md | 167 ++ .../image-spec/specs-go/v1/config.go | 103 ++ .../image-spec/specs-go/v1/descriptor.go | 68 + .../image-spec/specs-go/v1/index.go | 29 + .../image-spec/specs-go/v1/layout.go | 28 + .../image-spec/specs-go/v1/manifest.go | 32 + .../image-spec/specs-go/v1/mediatype.go | 48 + .../image-spec/specs-go/version.go | 32 + .../image-spec/specs-go/versioned.go | 23 + 135 files changed, 16391 insertions(+), 599 deletions(-) create mode 100644 vendor/github.com/docker/docker/api/types/container/waitcondition.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/config.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto create mode 100644 vendor/github.com/docker/docker/client/build_prune.go create mode 100644 vendor/github.com/docker/docker/client/config_create.go create mode 100644 vendor/github.com/docker/docker/client/config_inspect.go create mode 100644 vendor/github.com/docker/docker/client/config_list.go create mode 100644 vendor/github.com/docker/docker/client/config_remove.go create mode 100644 vendor/github.com/docker/docker/client/config_update.go create mode 100644 vendor/github.com/docker/docker/client/distribution_inspect.go create mode 100644 vendor/github.com/docker/docker/client/parse_logs.go create mode 100644 vendor/github.com/docker/docker/client/session.go create mode 100644 vendor/github.com/docker/docker/hack/README.md delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fmt.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/multireader.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/flags.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/flags_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mount.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/system/init.go create mode 100644 vendor/github.com/docker/docker/pkg/system/init_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lcow_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lcow_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path.go create mode 100644 vendor/github.com/docker/docker/pkg/system/rm.go delete mode 100644 vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go create mode 100644 vendor/github.com/gogo/protobuf/LICENSE create mode 100644 vendor/github.com/gogo/protobuf/README create mode 100644 vendor/github.com/gogo/protobuf/Readme.md create mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto create mode 100644 vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto create mode 100644 vendor/github.com/opencontainers/image-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/image-spec/README.md create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/versioned.go diff --git a/vendor.conf b/vendor.conf index 33f8b6eb9..6d40c8a39 100644 --- a/vendor.conf +++ b/vendor.conf @@ -1,15 +1,17 @@ github.com/Sirupsen/logrus 10f801ebc38b33738c9d17d50860f484a0988ff5 github.com/agl/ed25519 5312a61534124124185d41f09206b9fef1d88403 github.com/docker/distribution b38e5838b7b2f2ad48e06ec4b500011976080621 -github.com/docker/docker 420b67f892d5424be59a788a51e2c4e64bb9cd66 +github.com/docker/docker 6978a6e25a2e6063f280ec842bd0f3eae99426e1 github.com/docker/docker-credential-helpers 71779cf7f595bf47f5a56d0629ce044e59b1cffa github.com/docker/go d30aec9fd63c35133f8f79c3412ad91a3b08be06 github.com/docker/go-connections e15c02316c12de00874640cd76311849de2aeed5 github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3 github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a github.com/docker/notary v0.4.2 +github.com/gogo/protobuf v0.4 github.com/gorilla/mux 4c1c3952b7d9d0a061a3fa7b36fd373ba0398ebc github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb +github.com/opencontainers/image-spec f03dbe35d449c54915d235f1a3cf8f585a24babe github.com/opencontainers/runc 429a5387123625040bacfbb60d96b1cbd02293ab github.com/opencontainers/runtime-spec 198f23f827eea397d4331d7eb048d9d4c7ff7bee github.com/pkg/errors ff09b135c25aae272398c51a07235b90a75aa4f0 diff --git a/vendor/github.com/docker/docker/README.md b/vendor/github.com/docker/docker/README.md index f768ff844..533d7717d 100644 --- a/vendor/github.com/docker/docker/README.md +++ b/vendor/github.com/docker/docker/README.md @@ -51,7 +51,7 @@ Moby is NOT recommended for: - Application developers looking for an easy way to run their applications in containers. We recommend Docker CE instead. - Enterprise IT and development teams looking for a ready-to-use, commercially supported container platform. We recommend Docker EE instead. -- Anyone curious about containers and looking for an easy way to learn. We recommend the docker.com website instead. +- Anyone curious about containers and looking for an easy way to learn. We recommend the [docker.com](https://www.docker.com/) website instead. # Transitioning to Moby diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md index 8954ed017..bb8813252 100644 --- a/vendor/github.com/docker/docker/api/README.md +++ b/vendor/github.com/docker/docker/api/README.md @@ -14,8 +14,8 @@ It consists of various components in this repository: The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: -1. To automatically generate documentation. -2. To automatically generate the Go server and client. (A work-in-progress.) +1. Automatically generate documentation. +2. Automatically generate the Go server and client. (A work-in-progress.) 3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. ## Updating the API documentation diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index 9df6255c0..859daf602 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -4,15 +4,9 @@ import ( "encoding/json" "encoding/pem" "fmt" - "mime" "os" "path/filepath" - "sort" - "strconv" - "strings" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/system" "github.com/docker/libtrust" @@ -21,112 +15,17 @@ import ( // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion string = "1.30" + DefaultVersion string = "1.31" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. NoBaseImageSpecifier string = "scratch" ) -// byPortInfo is a temporary type used to sort types.Port by its fields -type byPortInfo []types.Port - -func (r byPortInfo) Len() int { return len(r) } -func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byPortInfo) Less(i, j int) bool { - if r[i].PrivatePort != r[j].PrivatePort { - return r[i].PrivatePort < r[j].PrivatePort - } - - if r[i].IP != r[j].IP { - return r[i].IP < r[j].IP - } - - if r[i].PublicPort != r[j].PublicPort { - return r[i].PublicPort < r[j].PublicPort - } - - return r[i].Type < r[j].Type -} - -// DisplayablePorts returns formatted string representing open ports of container -// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" -// it's used by command 'docker ps' -func DisplayablePorts(ports []types.Port) string { - type portGroup struct { - first uint16 - last uint16 - } - groupMap := make(map[string]*portGroup) - var result []string - var hostMappings []string - var groupMapKeys []string - sort.Sort(byPortInfo(ports)) - for _, port := range ports { - current := port.PrivatePort - portKey := port.Type - if port.IP != "" { - if port.PublicPort != current { - hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) - continue - } - portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) - } - group := groupMap[portKey] - - if group == nil { - groupMap[portKey] = &portGroup{first: current, last: current} - // record order that groupMap keys are created - groupMapKeys = append(groupMapKeys, portKey) - continue - } - if current == (group.last + 1) { - group.last = current - continue - } - - result = append(result, formGroup(portKey, group.first, group.last)) - groupMap[portKey] = &portGroup{first: current, last: current} - } - for _, portKey := range groupMapKeys { - g := groupMap[portKey] - result = append(result, formGroup(portKey, g.first, g.last)) - } - result = append(result, hostMappings...) - return strings.Join(result, ", ") -} - -func formGroup(key string, start, last uint16) string { - parts := strings.Split(key, "/") - groupType := parts[0] - var ip string - if len(parts) > 1 { - ip = parts[0] - groupType = parts[1] - } - group := strconv.Itoa(int(start)) - if start != last { - group = fmt.Sprintf("%s-%d", group, last) - } - if ip != "" { - group = fmt.Sprintf("%s:%s->%s", ip, group, group) - } - return fmt.Sprintf("%s/%s", group, groupType) -} - -// MatchesContentType validates the content type against the expected one -func MatchesContentType(contentType, expectedType string) bool { - mimetype, _, err := mime.ParseMediaType(contentType) - if err != nil { - logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) - } - return err == nil && mimetype == expectedType -} - // LoadOrCreateTrustKey attempts to load the libtrust key at the given path, // otherwise generates a new one func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { - err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "") if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 00255271b..18a1263f1 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -7,7 +7,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" - "github.com/docker/go-units" + units "github.com/docker/go-units" ) // CheckpointCreateOptions holds parameters to create a checkpoint from a container @@ -178,6 +178,11 @@ type ImageBuildOptions struct { SecurityOpt []string ExtraHosts []string // List of extra hosts Target string + SessionID string + + // TODO @jhowardmsft LCOW Support: This will require extending to include + // `Platform string`, but is ommited for now as it's hard-coded temporarily + // to avoid API changes. } // ImageBuildResponse holds information @@ -276,6 +281,12 @@ type ServiceCreateOptions struct { // // This field follows the format of the X-Registry-Auth header. EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool } // ServiceCreateResponse contains the information returned to a client @@ -315,6 +326,12 @@ type ServiceUpdateOptions struct { // The valid values are "previous" and "none". An empty value is the // same as "none". Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool } // ServiceListOptions holds parameters to list services with. diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go index 20c19f213..e4d2ce6e3 100644 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -16,6 +16,7 @@ type ContainerCreateConfig struct { HostConfig *container.HostConfig NetworkingConfig *network.NetworkingConfig AdjustCPUShares bool + Platform string } // ContainerRmConfig holds arguments for the container remove diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index 02e1b87a7..55a03fc98 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -7,6 +7,12 @@ import ( "github.com/docker/go-connections/nat" ) +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + // HealthConfig holds configuration settings for the HEALTHCHECK feature. type HealthConfig struct { // Test is the test to perform to check that the container is healthy. diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go new file mode 100644 index 000000000..64820fe35 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go index 7129a65ac..5f5f54034 100644 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -13,6 +13,12 @@ const ( PluginEventType = "plugin" // VolumeEventType is the event type that volumes generate VolumeEventType = "volume" + // ServiceEventType is the event type that services generate + ServiceEventType = "service" + // NodeEventType is the event type that nodes generate + NodeEventType = "node" + // SecretEventType is the event type that secrets generate + SecretEventType = "secret" ) // Actor describes something that generates events, @@ -36,6 +42,8 @@ type Message struct { Type string Action string Actor Actor + // Engine events are local scope. Cluster events are swarm scope. + Scope string `json:"scope,omitempty"` Time int64 `json:"time,omitempty"` TimeNano int64 `json:"timeNano,omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go index 8d15ed21b..7c7dbacc8 100644 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -58,6 +58,7 @@ type EndpointSettings struct { GlobalIPv6Address string GlobalIPv6PrefixLen int MacAddress string + DriverOpts map[string]string } // Task carries the information about one backend task @@ -100,3 +101,8 @@ func (es *EndpointSettings) Copy() *EndpointSettings { type NetworkingConfig struct { EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network } + +// ConfigReference specifies the source which provides a network's configuration +type ConfigReference struct { + Network string +} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 28fafab90..b98a943a1 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -3,13 +3,17 @@ package registry import ( "encoding/json" "net" + + "github.com/opencontainers/image-spec/specs-go/v1" ) // ServiceConfig stores daemon registry services configuration. type ServiceConfig struct { - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string + AllowNondistributableArtifactsCIDRs []*NetIPNet + AllowNondistributableArtifactsHostnames []string + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string } // NetIPNet is the net.IPNet type, which can be marshalled and @@ -102,3 +106,14 @@ type SearchResults struct { // Results is a slice containing the actual results for the search Results []SearchResult `json:"results"` } + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor v1.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []v1.Platform +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go index dc76a146b..54af82b31 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -25,3 +25,16 @@ type Driver struct { Name string `json:",omitempty"` Options map[string]string `json:",omitempty"` } + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go new file mode 100644 index 000000000..0fb021ce9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -0,0 +1,31 @@ +package swarm + +import "os" + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + Data []byte `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget + ConfigID string + ConfigName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index 135f7cbbf..6f8b45f6b 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -68,4 +68,5 @@ type ContainerSpec struct { Hosts []string `json:",omitempty"` DNSConfig *DNSConfig `json:",omitempty"` Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go index 693f85cce..97c484e14 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/network.go +++ b/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -1,5 +1,9 @@ package swarm +import ( + "github.com/docker/docker/api/types/network" +) + // Endpoint represents an endpoint. type Endpoint struct { Spec EndpointSpec `json:",omitempty"` @@ -78,18 +82,21 @@ type Network struct { // NetworkSpec represents the spec of a network. type NetworkSpec struct { Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - Attachable bool `json:",omitempty"` - Ingress bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` } // NetworkAttachmentConfig represents the configuration of a network attachment. type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` } // NetworkAttachment represents a network attachment. diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go index 379e17a77..28c6851e9 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -52,6 +52,7 @@ type NodeDescription struct { Platform Platform `json:",omitempty"` Resources Resources `json:",omitempty"` Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` } // Platform represents the platform (Arch/OS). diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go new file mode 100644 index 000000000..47ae234ef --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto + +package runtime diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go new file mode 100644 index 000000000..1fdc9b043 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -0,0 +1,712 @@ +// Code generated by protoc-gen-gogo. +// source: plugin.proto +// DO NOT EDIT! + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + plugin.proto + + It has these top-level messages: + PluginSpec + PluginPrivilege +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +func (m *PluginSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginSpec) GetRemote() string { + if m != nil { + return m.Remote + } + return "" +} + +func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { + if m != nil { + return m.Privileges + } + return nil +} + +func (m *PluginSpec) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` +} + +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +func (m *PluginPrivilege) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginPrivilege) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PluginPrivilege) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*PluginSpec)(nil), "PluginSpec") + proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") +} +func (m *PluginSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Remote) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i += copy(dAtA[i:], m.Remote) + } + if len(m.Privileges) > 0 { + for _, msg := range m.Privileges { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Disabled { + dAtA[i] = 0x20 + i++ + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginSpec) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Remote) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Privileges) > 0 { + for _, e := range m.Privileges { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Disabled { + n += 2 + } + return n +} + +func (m *PluginPrivilege) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PluginSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Remote = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Privileges = append(m.Privileges, &PluginPrivilege{}) + if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } + +var fileDescriptorPlugin = []byte{ + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, + 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b, + 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, + 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12, + 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35, + 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c, + 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a, + 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab, + 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, + 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33, + 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79, + 0x0c, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto new file mode 100644 index 000000000..06eb7ba65 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +message PluginSpec { + string name = 1; + string remote = 2; + repeated PluginPrivilege privileges = 3; + bool disabled = 4; +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +message PluginPrivilege { + string name = 1; + string description = 2; + repeated string value = 3; +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index 08da33843..b65fa86da 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -2,12 +2,14 @@ package swarm import "time" -// ClusterInfo represents info about the cluster for outputing in "info" +// ClusterInfo represents info about the cluster for outputting in "info" // it contains the same information as "Swarm", but without the JoinTokens type ClusterInfo struct { ID string Meta - Spec Spec + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool } // Swarm represents a swarm. @@ -107,6 +109,16 @@ type CAConfig struct { // ExternalCAs is a list of CAs to which a manager node will make // certificate signing requests for node certificates. ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate ane key. + ForceRotate uint64 `json:",omitempty"` } // ExternalCAProtocol represents type of external CA. @@ -126,6 +138,10 @@ type ExternalCA struct { // Options is a set of additional key/value pairs whose interpretation // depends on the specified CA type. Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string } // InitRequest is the request used to init a swarm. diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index 99e9a6d58..1712c06cf 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -1,6 +1,10 @@ package swarm -import "time" +import ( + "time" + + "github.com/docker/docker/api/types/swarm/runtime" +) // TaskState represents the state of a task. type TaskState string @@ -51,7 +55,11 @@ type Task struct { // TaskSpec represents the spec of a task. type TaskSpec struct { - ContainerSpec ContainerSpec `json:",omitempty"` + // ContainerSpec and PluginSpec are mutually exclusive. + // PluginSpec will only be used when the `Runtime` field is set to `plugin` + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + Resources *ResourceRequirements `json:",omitempty"` RestartPolicy *RestartPolicy `json:",omitempty"` Placement *Placement `json:",omitempty"` @@ -67,9 +75,6 @@ type TaskSpec struct { ForceUpdate uint64 Runtime RuntimeType `json:",omitempty"` - // TODO (ehazlett): this should be removed and instead - // use struct tags (proto) for the runtimes - RuntimeData []byte `json:",omitempty"` } // Resources represents resources (CPU/Memory). @@ -88,6 +93,11 @@ type ResourceRequirements struct { type Placement struct { Constraints []string `json:",omitempty"` Preferences []PlacementPreference `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` } // PlacementPreference provides a way to make the scheduler aware of factors diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index 9493bd95e..c96df2733 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -45,6 +45,12 @@ type ImageInspect struct { VirtualSize int64 GraphDriver GraphDriverData RootFS RootFS + Metadata ImageMetadata +} + +// ImageMetadata contains engine-local data about the image +type ImageMetadata struct { + LastTagTime time.Time `json:",omitempty"` } // Container contains response of Engine API: @@ -277,7 +283,7 @@ type Health struct { // ContainerState stores container's running state // it's part of ContainerJSONBase and will return by "inspect" command type ContainerState struct { - Status string + Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" Running bool Paused bool Restarting bool @@ -320,6 +326,7 @@ type ContainerJSONBase struct { Name string RestartCount int Driver string + Platform string MountLabel string ProcessLabel string AppArmorProfile string @@ -396,13 +403,15 @@ type NetworkResource struct { Name string // Name is the requested name of the network ID string `json:"Id"` // ID uniquely identifies a network on a single machine Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level) + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 IPAM network.IPAM // IPAM is the network's IP Address Management Internal bool // Internal represents if the network is used internal only Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. Containers map[string]EndpointResource // Containers contains endpoints belonging to the network Options map[string]string // Options holds the network specific options to use for when creating the network Labels map[string]string // Labels holds metadata specific to the network being created @@ -430,11 +439,14 @@ type NetworkCreate struct { // which has the same name but it is not guaranteed to catch all name collisions. CheckDuplicate bool Driver string + Scope string EnableIPv6 bool IPAM *network.IPAM Internal bool Attachable bool Ingress bool + ConfigOnly bool + ConfigFrom *network.ConfigReference Options map[string]string Labels map[string]string } @@ -463,6 +475,12 @@ type NetworkDisconnect struct { Force bool } +// NetworkInspectOptions holds parameters to inspect network +type NetworkInspectOptions struct { + Scope string + Verbose bool +} + // Checkpoint represents the details of a checkpoint type Checkpoint struct { Name string // Name is the name of the checkpoint @@ -477,10 +495,11 @@ type Runtime struct { // DiskUsage contains response of Engine API: // GET "/system/df" type DiskUsage struct { - LayersSize int64 - Images []*ImageSummary - Containers []*Container - Volumes []*Volume + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume + BuilderSize int64 } // ContainersPruneReport contains the response for Engine API: @@ -504,6 +523,12 @@ type ImagesPruneReport struct { SpaceReclaimed uint64 } +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +type BuildCachePruneReport struct { + SpaceReclaimed uint64 +} + // NetworksPruneReport contains the response for Engine API: // POST "/networks/prune" type NetworksPruneReport struct { @@ -522,6 +547,18 @@ type SecretListOptions struct { Filters filters.Args } +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} + // PushResult contains the tag, manifest digest, and manifest size from the // push. It's used to signal this information to the trust code in the client // so it can sign the manifest if necessary. @@ -530,3 +567,8 @@ type PushResult struct { Digest string Size int } + +// BuildResult contains the image id of a successful build +type BuildResult struct { + ID string +} diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go index da4f8ebd9..a69b0cfb1 100644 --- a/vendor/github.com/docker/docker/api/types/volume.go +++ b/vendor/github.com/docker/docker/api/types/volume.go @@ -7,6 +7,9 @@ package types // swagger:model Volume type Volume struct { + // Time volume was created. + CreatedAt string `json:"CreatedAt,omitempty"` + // Name of the volume driver used by the volume. // Required: true Driver string `json:"Driver"` diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go new file mode 100644 index 000000000..ccab115d3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_prune.go @@ -0,0 +1,30 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// BuildCachePrune requests the daemon to delete unused cache data +func (cli *Client) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) { + if err := cli.NewVersionError("1.31", "build prune"); err != nil { + return nil, err + } + + report := types.BuildCachePruneReport{} + + serverResp, err := cli.post(ctx, "/build/prune", nil, nil, nil) + if err != nil { + return nil, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return nil, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return &report, nil +} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index df3698adc..7e1453172 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -46,6 +46,7 @@ For example, to list running containers (the equivalent of "docker ps"): package client import ( + "errors" "fmt" "net/http" "net/url" @@ -54,10 +55,16 @@ import ( "strings" "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" "github.com/docker/go-connections/sockets" "github.com/docker/go-connections/tlsconfig" + "golang.org/x/net/context" ) +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + // Client is the API client that performs all operations // against a docker server. type Client struct { @@ -81,6 +88,23 @@ type Client struct { manualOverride bool } +// CheckRedirect specifies the policy for dealing with redirect responses: +// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . +// The Docker client (and by extension docker API client) can be made to to send a request +// like POST /containers//start where what would normally be in the name section of the URL is empty. +// This triggers an HTTP 301 from the daemon. +// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. +// This behavior change manifests in the client in that before the 301 was not followed and +// the client did not generate an error, but now results in a message like Error response from daemon: page not found. +func CheckRedirect(req *http.Request, via []*http.Request) error { + if via[0].Method == http.MethodGet { + return http.ErrUseLastResponse + } + return ErrRedirect +} + // NewEnvClient initializes a new API client based on environment variables. // Use DOCKER_HOST to set the url to the docker server. // Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. @@ -104,6 +128,7 @@ func NewEnvClient() (*Client, error) { Transport: &http.Transport{ TLSClientConfig: tlsc, }, + CheckRedirect: CheckRedirect, } } @@ -147,7 +172,8 @@ func NewClient(host string, version string, client *http.Client, httpHeaders map transport := new(http.Transport) sockets.ConfigureTransport(transport, proto, addr) client = &http.Client{ - Transport: transport, + Transport: transport, + CheckRedirect: CheckRedirect, } } @@ -193,9 +219,9 @@ func (cli *Client) getAPIPath(p string, query url.Values) string { var apiPath string if cli.version != "" { v := strings.TrimPrefix(cli.version, "v") - apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) + apiPath = cli.basePath + "/v" + v + p } else { - apiPath = fmt.Sprintf("%s%s", cli.basePath, p) + apiPath = cli.basePath + p } u := &url.URL{ @@ -215,13 +241,40 @@ func (cli *Client) ClientVersion() string { return cli.version } -// UpdateClientVersion updates the version string associated with this -// instance of the Client. This operation doesn't acquire a mutex. -func (cli *Client) UpdateClientVersion(v string) { - if !cli.manualOverride { - cli.version = v +// NegotiateAPIVersion updates the version string associated with this +// instance of the Client to match the latest version the server supports +func (cli *Client) NegotiateAPIVersion(ctx context.Context) { + ping, _ := cli.Ping(ctx) + cli.NegotiateAPIVersionPing(ping) +} + +// NegotiateAPIVersionPing updates the version string associated with this +// instance of the Client to match the latest version the server supports +func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { + if cli.manualOverride { + return } + // try the latest version before versioning headers existed + if p.APIVersion == "" { + p.APIVersion = "1.24" + } + + // if the client is not initialized with a version, start with the latest supported version + if cli.version == "" { + cli.version = api.DefaultVersion + } + + // if server version is lower than the maximum version supported by the Client, downgrade + if versions.LessThan(p.APIVersion, api.DefaultVersion) { + cli.version = p.APIVersion + } +} + +// DaemonHost returns the host associated with this instance of the Client. +// This operation doesn't acquire a mutex. +func (cli *Client) DaemonHost() string { + return cli.host } // ParseHost verifies that the given host strings is valid. diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go new file mode 100644 index 000000000..bc4a952b2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -0,0 +1,25 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigCreate creates a new Config. +func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + var response types.ConfigCreateResponse + if err := cli.NewVersionError("1.30", "config create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/configs/create", nil, config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go new file mode 100644 index 000000000..ebb6d636c --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -0,0 +1,37 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigInspectWithRaw returns the config information with raw data +func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { + if err := cli.NewVersionError("1.30", "config inspect"); err != nil { + return swarm.Config{}, nil, err + } + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return swarm.Config{}, nil, configNotFoundError{id} + } + return swarm.Config{}, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Config{}, nil, err + } + + var config swarm.Config + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&config) + + return config, body, err +} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go new file mode 100644 index 000000000..8483ca14d --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_list.go @@ -0,0 +1,38 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if err := cli.NewVersionError("1.30", "config list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/configs", query, nil) + if err != nil { + return nil, err + } + + var configs []swarm.Config + err = json.NewDecoder(resp.body).Decode(&configs) + ensureReaderClosed(resp) + return configs, err +} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go new file mode 100644 index 000000000..726b5c853 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -0,0 +1,13 @@ +package client + +import "golang.org/x/net/context" + +// ConfigRemove removes a Config. +func (cli *Client) ConfigRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.30", "config remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go new file mode 100644 index 000000000..823751bb8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ConfigUpdate attempts to update a Config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { + if err := cli.NewVersionError("1.30", "config update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go index eea468215..0fdf3ed0c 100644 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -11,6 +11,26 @@ import ( // It returns a types.HijackedConnection with the hijacked connection // and the a reader to get output. It's up to the called to close // the hijacked connection by calling types.HijackedResponse.Close. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { query := url.Values{} if options.Stream { diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go index 545aa5438..30ba6803f 100644 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -20,7 +20,7 @@ func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri query := url.Values{} query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - urlStr := fmt.Sprintf("/containers/%s/archive", containerID) + urlStr := "/containers/" + containerID + "/archive" response, err := cli.head(ctx, urlStr, query, nil) if err != nil { return types.ContainerPathStat{}, err @@ -30,6 +30,7 @@ func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri } // CopyToContainer copies content into the container filesystem. +// Note that `content` must be a Reader for a TAR func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { query := url.Values{} query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. @@ -42,7 +43,7 @@ func (cli *Client) CopyToContainer(ctx context.Context, container, path string, query.Set("copyUIDGID", "true") } - apiPath := fmt.Sprintf("/containers/%s/archive", container) + apiPath := "/containers/" + container + "/archive" response, err := cli.putRaw(ctx, apiPath, query, content, nil) if err != nil { @@ -63,7 +64,7 @@ func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath str query := make(url.Values, 1) query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - apiPath := fmt.Sprintf("/containers/%s/archive", container) + apiPath := "/containers/" + container + "/archive" response, err := cli.get(ctx, apiPath, query, nil) if err != nil { return nil, types.ContainerPathStat{}, err diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go index 69056b632..0f32e9f12 100644 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -13,6 +13,26 @@ import ( // ContainerLogs returns the logs generated by a container in an io.ReadCloser. // It's up to the caller to close the stream. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { query := url.Values{} if options.ShowStdout { diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go index 93212c70e..854c6c053 100644 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -2,25 +2,83 @@ package client import ( "encoding/json" + "net/url" "golang.org/x/net/context" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" ) -// ContainerWait pauses execution until a container exits. -// It returns the API status code as response of its readiness. -func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int64, error) { - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) +// ContainerWait waits until the specified container is in a certain state +// indicated by the given condition, either "not-running" (default), +// "next-exit", or "removed". +// +// If this client's API version is before 1.30, condition is ignored and +// ContainerWait will return immediately with the two channels, as the server +// will wait as if the condition were "not-running". +// +// If this client's API version is at least 1.30, ContainerWait blocks until +// the request has been acknowledged by the server (with a response header), +// then returns two channels on which the caller can wait for the exit status +// of the container or an error if there was a problem either beginning the +// wait request or in getting the response. This allows the caller to +// synchronize ContainerWait with other calls, such as specifying a +// "next-exit" condition before issuing a ContainerStart request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { + if versions.LessThan(cli.ClientVersion(), "1.30") { + return cli.legacyContainerWait(ctx, containerID) + } + + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error, 1) + + query := url.Values{} + query.Set("condition", string(condition)) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) if err != nil { - return -1, err - } - defer ensureReaderClosed(resp) - - var res container.ContainerWaitOKBody - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - return -1, err + defer ensureReaderClosed(resp) + errC <- err + return resultC, errC } - return res.StatusCode, nil + go func() { + defer ensureReaderClosed(resp) + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} + +// legacyContainerWait returns immediately and doesn't have an option to wait +// until the container is removed. +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error) + + go func() { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + errC <- err + return + } + defer ensureReaderClosed(resp) + + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC } diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go new file mode 100644 index 000000000..aa5bc6a6c --- /dev/null +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + registrytypes "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// DistributionInspect returns the image digest with full Manifest +func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { + // Contact the registry to retrieve digest and platform information + var distributionInspect registrytypes.DistributionInspect + + if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { + return distributionInspect, err + } + var headers map[string][]string + + if encodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {encodedRegistryAuth}, + } + } + + resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + if err != nil { + return distributionInspect, err + } + + err = json.NewDecoder(resp.body).Decode(&distributionInspect) + ensureReaderClosed(resp) + return distributionInspect, err +} diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index 4f767bd8d..e0effafc0 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -256,6 +256,28 @@ func IsErrSecretNotFound(err error) bool { return ok } +// configNotFoundError implements an error returned when a config is not found. +type configNotFoundError struct { + name string +} + +// Error returns a string representation of a configNotFoundError +func (e configNotFoundError) Error() string { + return fmt.Sprintf("Error: no such config: %s", e.name) +} + +// NotFound indicates that this error type is of NotFound +func (e configNotFoundError) NotFound() bool { + return true +} + +// IsErrConfigNotFound returns true if the error is caused +// when a config is not found. +func IsErrConfigNotFound(err error) bool { + _, ok := err.(configNotFoundError) + return ok +} + // pluginNotFoundError implements an error returned when a plugin is not in the docker host. type pluginNotFoundError struct { name string diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 74c53f52b..346c74ae8 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -1,8 +1,8 @@ package client import ( + "bufio" "crypto/tls" - "errors" "fmt" "net" "net/http" @@ -14,6 +14,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/tlsconfig" "github.com/docker/go-connections/sockets" + "github.com/pkg/errors" "golang.org/x/net/context" ) @@ -46,37 +47,12 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu } req = cli.addHeaders(req, headers) - req.Host = cli.addr - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", "tcp") - - conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + conn, err := cli.setupHijackConn(req, "tcp") if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") - } return types.HijackedResponse{}, err } - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := conn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - clientconn := httputil.NewClientConn(conn, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - _, err = clientconn.Do(req) - - rwc, br := clientconn.Hijack() - - return types.HijackedResponse{Conn: rwc, Reader: br}, err + return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err } func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { @@ -175,3 +151,56 @@ func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { } return net.Dial(proto, addr) } + +func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, error) { + req.Host = cli.addr + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", proto) + + conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + if err != nil { + return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + resp, err := clientconn.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + resp.Body.Close() + return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) + } + + c, br := clientconn.Hijack() + if br.Buffered() > 0 { + // If there is buffered content, wrap the connection + c = &hijackedConn{c, br} + } else { + br.Reset(nil) + } + + return c, nil +} + +type hijackedConn struct { + net.Conn + r *bufio.Reader +} + +func (c *hijackedConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index bb69143e9..44a215f90 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -120,6 +120,9 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur return query, err } query.Set("cachefrom", string(cacheFromJSON)) + if options.SessionID != "" { + query.Set("session", options.SessionID) + } return query, nil } diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 8dbe4300d..acd4de1db 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -2,6 +2,7 @@ package client import ( "io" + "net" "time" "github.com/docker/docker/api/types" @@ -18,7 +19,9 @@ import ( // CommonAPIClient is the common methods between stable and experimental versions of APIClient. type CommonAPIClient interface { + ConfigAPIClient ContainerAPIClient + DistributionAPIClient ImageAPIClient NodeAPIClient NetworkAPIClient @@ -29,8 +32,11 @@ type CommonAPIClient interface { SystemAPIClient VolumeAPIClient ClientVersion() string + DaemonHost() string ServerVersion(ctx context.Context) (types.Version, error) - UpdateClientVersion(v string) + NegotiateAPIVersion(ctx context.Context) + NegotiateAPIVersionPing(types.Ping) + DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) } // ContainerAPIClient defines API client methods for the containers @@ -62,15 +68,21 @@ type ContainerAPIClient interface { ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) ContainerUnpause(ctx context.Context, container string) error ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) - ContainerWait(ctx context.Context, container string) (int64, error) + ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) } +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) +} + // ImageAPIClient defines API client methods for the images type ImageAPIClient interface { ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) @@ -91,8 +103,8 @@ type NetworkAPIClient interface { NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error - NetworkInspect(ctx context.Context, networkID string, verbose bool) (types.NetworkResource, error) - NetworkInspectWithRaw(ctx context.Context, networkID string, verbose bool) (types.NetworkResource, []byte, error) + NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) NetworkRemove(ctx context.Context, networkID string) error NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) @@ -171,3 +183,12 @@ type SecretAPIClient interface { SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error } + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) + ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) + ConfigRemove(ctx context.Context, id string) error + ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) + ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error +} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go index 724230402..848c9799f 100644 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -12,22 +12,25 @@ import ( ) // NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string, verbose bool) (types.NetworkResource, error) { - networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, verbose) +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) return networkResource, err } // NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. -func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, verbose bool) (types.NetworkResource, []byte, error) { +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { var ( networkResource types.NetworkResource resp serverResponse err error ) query := url.Values{} - if verbose { + if options.Verbose { query.Set("verbose", "true") } + if options.Scope != "" { + query.Set("scope", options.Scope) + } resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) if err != nil { if resp.statusCode == http.StatusNotFound { diff --git a/vendor/github.com/docker/docker/client/parse_logs.go b/vendor/github.com/docker/docker/client/parse_logs.go new file mode 100644 index 000000000..e427f80a7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/parse_logs.go @@ -0,0 +1,41 @@ +package client + +// parse_logs.go contains utility helpers for getting information out of docker +// log lines. really, it only contains ParseDetails right now. maybe in the +// future there will be some desire to parse log messages back into a struct? +// that would go here if we did + +import ( + "net/url" + "strings" + + "github.com/pkg/errors" +) + +// ParseLogDetails takes a details string of key value pairs in the form +// "k=v,l=w", where the keys and values are url query escaped, and each pair +// is separated by a comma, returns a map. returns an error if the details +// string is not in a valid format +// the exact form of details encoding is implemented in +// api/server/httputils/write_log_stream.go +func ParseLogDetails(details string) (map[string]string, error) { + pairs := strings.Split(details, ",") + detailsMap := make(map[string]string, len(pairs)) + for _, pair := range pairs { + p := strings.SplitN(pair, "=", 2) + // if there is no equals sign, we will only get 1 part back + if len(p) != 2 { + return nil, errors.New("invalid details format") + } + k, err := url.QueryUnescape(p[0]) + if err != nil { + return nil, err + } + v, err := url.QueryUnescape(p[1]) + if err != nil { + return nil, err + } + detailsMap[k] = v + } + return detailsMap, nil +} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index d6212ef8b..a4c2e2c4d 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -1,8 +1,6 @@ package client import ( - "fmt" - "github.com/docker/docker/api/types" "golang.org/x/net/context" ) @@ -10,7 +8,7 @@ import ( // Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { var ping types.Ping - req, err := cli.buildRequest("GET", fmt.Sprintf("%s/_ping", cli.basePath), nil, nil) + req, err := cli.buildRequest("GET", cli.basePath+"/_ping", nil, nil) if err != nil { return ping, err } @@ -20,13 +18,15 @@ func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { } defer ensureReaderClosed(serverResp) - ping.APIVersion = serverResp.header.Get("API-Version") + if serverResp.header != nil { + ping.APIVersion = serverResp.header.Get("API-Version") - if serverResp.header.Get("Docker-Experimental") == "true" { - ping.Experimental = true + if serverResp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + ping.OSType = serverResp.header.Get("OSType") } - ping.OSType = serverResp.header.Get("OSType") - - return ping, nil + err = cli.checkResponseErr(serverResp) + return ping, err } diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go index 24293c507..049ebfa2a 100644 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -1,7 +1,6 @@ package client import ( - "fmt" "io" "net/url" @@ -13,6 +12,9 @@ import ( // PluginUpgrade upgrades a plugin func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { + return nil, err + } query := url.Values{} if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { return nil, errors.Wrap(err, "invalid remote reference") @@ -33,5 +35,5 @@ func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, fmt.Sprintf("/plugins/%s/upgrade", name), query, privileges, headers) + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) } diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index 6457b316a..3e7d43fea 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -24,6 +24,7 @@ type serverResponse struct { body io.ReadCloser header http.Header statusCode int + reqURL *url.URL } // head sends an http request to the docker API using the method HEAD. @@ -118,11 +119,18 @@ func (cli *Client) sendRequest(ctx context.Context, method, path string, query u if err != nil { return serverResponse{}, err } - return cli.doRequest(ctx, req) + resp, err := cli.doRequest(ctx, req) + if err != nil { + return resp, err + } + if err := cli.checkResponseErr(resp); err != nil { + return resp, err + } + return resp, nil } func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { - serverResp := serverResponse{statusCode: -1} + serverResp := serverResponse{statusCode: -1, reqURL: req.URL} resp, err := ctxhttp.Do(ctx, cli.client, req) if err != nil { @@ -179,37 +187,44 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp if resp != nil { serverResp.statusCode = resp.StatusCode + serverResp.body = resp.Body + serverResp.header = resp.Header } - - if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return serverResp, err - } - if len(body) == 0 { - return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) - } - - var errorMessage string - if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && - resp.Header.Get("Content-Type") == "application/json" { - var errorResponse types.ErrorResponse - if err := json.Unmarshal(body, &errorResponse); err != nil { - return serverResp, fmt.Errorf("Error reading JSON: %v", err) - } - errorMessage = errorResponse.Message - } else { - errorMessage = string(body) - } - - return serverResp, fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) - } - - serverResp.body = resp.Body - serverResp.header = resp.Header return serverResp, nil } +func (cli *Client) checkResponseErr(serverResp serverResponse) error { + if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { + return nil + } + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return err + } + if len(body) == 0 { + return fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) + } + + var ct string + if serverResp.header != nil { + ct = serverResp.header.Get("Content-Type") + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return fmt.Errorf("Error reading JSON: %v", err) + } + errorMessage = errorResponse.Message + } else { + errorMessage = string(body) + } + + return fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) +} + func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { // Add CLI Config's HTTP Headers BEFORE we set the Docker headers // then the user can't change OUR headers @@ -239,9 +254,9 @@ func encodeData(data interface{}) (*bytes.Buffer, error) { } func ensureReaderClosed(response serverResponse) { - if body := response.body; body != nil { + if response.body != nil { // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(ioutil.Discard, body, 512) + io.CopyN(ioutil.Discard, response.body, 512) response.body.Close() } } diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go index b5325a560..4354afea6 100644 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -11,6 +11,9 @@ import ( // SecretCreate creates a new Secret. func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { var response types.SecretCreateResponse + if err := cli.NewVersionError("1.25", "secret create"); err != nil { + return response, err + } resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) if err != nil { return response, err diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go index f77457611..9b602972b 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -12,6 +12,9 @@ import ( // SecretInspectWithRaw returns the secret information with raw data func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { + return swarm.Secret{}, nil, err + } resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) if err != nil { if resp.statusCode == http.StatusNotFound { diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go index 7e9d5ec16..0d33ecfbc 100644 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -12,6 +12,9 @@ import ( // SecretList returns the list of secrets. func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if err := cli.NewVersionError("1.25", "secret list"); err != nil { + return nil, err + } query := url.Values{} if options.Filters.Len() > 0 { diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go index 1955b988a..c5e37af17 100644 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -4,6 +4,9 @@ import "golang.org/x/net/context" // SecretRemove removes a Secret. func (cli *Client) SecretRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.25", "secret remove"); err != nil { + return err + } resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go index 42cdbbe17..875a4c901 100644 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -8,8 +8,11 @@ import ( "golang.org/x/net/context" ) -// SecretUpdate attempts to updates a Secret +// SecretUpdate attempts to update a Secret func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + if err := cli.NewVersionError("1.25", "secret update"); err != nil { + return err + } query := url.Values{} query.Set("version", strconv.FormatUint(version.Index, 10)) resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index 3d1be225b..a36839443 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -2,20 +2,71 @@ package client import ( "encoding/json" + "fmt" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" "golang.org/x/net/context" ) // ServiceCreate creates a new Service. func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { - var headers map[string][]string + var distErr error + + headers := map[string][]string{ + "version": {cli.version}, + } if options.EncodedRegistryAuth != "" { - headers = map[string][]string{ - "X-Registry-Auth": {options.EncodedRegistryAuth}, + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container + if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { + service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + + if err := validateServiceSpec(service); err != nil { + return types.ServiceCreateResponse{}, err + } + + // ensure that the image is tagged + var imgPlatforms []swarm.Platform + if service.TaskTemplate.ContainerSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } + } + } + + // ensure that the image is tagged + if service.TaskTemplate.PluginSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.PluginSpec.Remote = img + } + } + } + + if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { + service.TaskTemplate.Placement = &swarm.Placement{} + } + if len(imgPlatforms) > 0 { + service.TaskTemplate.Placement.Platforms = imgPlatforms } var response types.ServiceCreateResponse @@ -25,6 +76,81 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, } err = json.NewDecoder(resp.body).Decode(&response) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + ensureReaderClosed(resp) return response, err } + +func imageDigestAndPlatforms(ctx context.Context, cli *Client, image, encodedAuth string) (string, []swarm.Platform, error) { + distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) + imageWithDigest := image + var platforms []swarm.Platform + if err != nil { + return "", nil, err + } + + imageWithDigest = imageWithDigestString(image, distributionInspect.Descriptor.Digest) + + if len(distributionInspect.Platforms) > 0 { + platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) + for _, p := range distributionInspect.Platforms { + platforms = append(platforms, swarm.Platform{ + Architecture: p.Architecture, + OS: p.OS, + }) + } + } + return imageWithDigest, platforms, err +} + +// imageWithDigestString takes an image string and a digest, and updates +// the image string if it didn't originally contain a digest. It returns +// an empty string if there are no updates. +func imageWithDigestString(image string, dgst digest.Digest) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { + // ensure that image gets a default tag if none is provided + img, err := reference.WithDigest(namedRef, dgst) + if err == nil { + return reference.FamiliarString(img) + } + } + } + return "" +} + +// imageWithTagString takes an image string, and returns a tagged image +// string, adding a 'latest' tag if one was not provided. It returns an +// emptry string if a canonical reference was provided +func imageWithTagString(image string) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + return reference.FamiliarString(reference.TagNameOnly(namedRef)) + } + return "" +} + +// digestWarning constructs a formatted warning string using the +// image name that could not be pinned by digest. The formatting +// is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} + +func validateServiceSpec(s swarm.ServiceSpec) error { + if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { + return errors.New("must not specify both a container spec and a plugin spec in the task template") + } + if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { + return errors.New("mismatched runtime with plugin spec") + } + if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { + return errors.New("mismatched runtime with container spec") + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go index 873a1e055..8764f299a 100644 --- a/vendor/github.com/docker/docker/client/service_update.go +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -13,14 +13,16 @@ import ( // ServiceUpdate updates a Service. func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { var ( - headers map[string][]string query = url.Values{} + distErr error ) + headers := map[string][]string{ + "version": {cli.version}, + } + if options.EncodedRegistryAuth != "" { - headers = map[string][]string{ - "X-Registry-Auth": {options.EncodedRegistryAuth}, - } + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} } if options.RegistryAuthFrom != "" { @@ -33,6 +35,46 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version query.Set("version", strconv.FormatUint(version.Index, 10)) + if err := validateServiceSpec(service); err != nil { + return types.ServiceUpdateResponse{}, err + } + + var imgPlatforms []swarm.Platform + // ensure that the image is tagged + if service.TaskTemplate.ContainerSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } + } + } + + // ensure that the image is tagged + if service.TaskTemplate.PluginSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.PluginSpec.Remote = img + } + } + } + + if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { + service.TaskTemplate.Placement = &swarm.Placement{} + } + if len(imgPlatforms) > 0 { + service.TaskTemplate.Placement.Platforms = imgPlatforms + } + var response types.ServiceUpdateResponse resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) if err != nil { @@ -40,6 +82,11 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version } err = json.NewDecoder(resp.body).Decode(&response) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/docker/docker/client/session.go b/vendor/github.com/docker/docker/client/session.go new file mode 100644 index 000000000..8ee916213 --- /dev/null +++ b/vendor/github.com/docker/docker/client/session.go @@ -0,0 +1,19 @@ +package client + +import ( + "net" + "net/http" + + "golang.org/x/net/context" +) + +// DialSession returns a connection that can be used communication with daemon +func (cli *Client) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequest("POST", "/session", nil) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + return cli.setupHijackConn(req, proto) +} diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go index 23d520ecb..f3d8877df 100644 --- a/vendor/github.com/docker/docker/client/utils.go +++ b/vendor/github.com/docker/docker/client/utils.go @@ -1,9 +1,10 @@ package client import ( - "github.com/docker/docker/api/types/filters" "net/url" "regexp" + + "github.com/docker/docker/api/types/filters" ) var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) diff --git a/vendor/github.com/docker/docker/hack/README.md b/vendor/github.com/docker/docker/hack/README.md new file mode 100644 index 000000000..802395d53 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/README.md @@ -0,0 +1,60 @@ +## About + +This directory contains a collection of scripts used to build and manage this +repository. If there are any issues regarding the intention of a particular +script (or even part of a certain script), please reach out to us. +It may help us either refine our current scripts, or add on new ones +that are appropriate for a given use case. + +## DinD (dind.sh) + +DinD is a wrapper script which allows Docker to be run inside a Docker +container. DinD requires the container to +be run with privileged mode enabled. + +## Generate Authors (generate-authors.sh) + +Generates AUTHORS; a file with all the names and corresponding emails of +individual contributors. AUTHORS can be found in the home directory of +this repository. + +## Make + +There are two make files, each with different extensions. Neither are supposed +to be called directly; only invoke `make`. Both scripts run inside a Docker +container. + +### make.ps1 + +- The Windows native build script that uses PowerShell semantics; it is limited +unlike `hack\make.sh` since it does not provide support for the full set of +operations provided by the Linux counterpart, `make.sh`. However, `make.ps1` +does provide support for local Windows development and Windows to Windows CI. +More information is found within `make.ps1` by the author, @jhowardmsft + +### make.sh + +- Referenced via `make test` when running tests on a local machine, +or directly referenced when running tests inside a Docker development container. +- When running on a local machine, `make test` to run all tests found in +`test`, `test-unit`, `test-integration-cli`, and `test-docker-py` on +your local machine. The default timeout is set in `make.sh` to 60 minutes +(`${TIMEOUT:=60m}`), since it currently takes up to an hour to run +all of the tests. +- When running inside a Docker development container, `hack/make.sh` does +not have a single target that runs all the tests. You need to provide a +single command line with multiple targets that performs the same thing. +An example referenced from [Run targets inside a development container](https://docs.docker.com/opensource/project/test-and-docs/#run-targets-inside-a-development-container): `root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py` +- For more information related to testing outside the scope of this README, +refer to +[Run tests and test documentation](https://docs.docker.com/opensource/project/test-and-docs/) + +## Release (release.sh) + +Releases any bundles built by `make` on a public AWS S3 bucket. +For information regarding configuration, please view `release.sh`. + +## Vendor (vendor.sh) + +A shell script that is a wrapper around Vndr. For information on how to use +this, please refer to [vndr's README](https://github.com/LK4D4/vndr/blob/master/README.md) diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md index 7366f7233..1cea52526 100644 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md +++ b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md @@ -38,6 +38,8 @@ Following environment variables are known to work in this step: - `BUILDFLAGS` - `DOCKER_INCREMENTAL_BINARY` +Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`. + ### Step 2: Execute tests $ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fmt.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go deleted file mode 100644 index 0b04b0ba3..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/fmt.go +++ /dev/null @@ -1,22 +0,0 @@ -package ioutils - -import ( - "fmt" - "io" -) - -// FprintfIfNotEmpty prints the string value if it's not empty -func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { - if value != "" { - return fmt.Fprintf(w, format, value) - } - return 0, nil -} - -// FprintfIfTrue prints the boolean value if it's true -func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { - if ok { - return fmt.Fprintf(w, format, ok) - } - return 0, nil -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go deleted file mode 100644 index edb043ddc..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/multireader.go +++ /dev/null @@ -1,224 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "os" -) - -type pos struct { - idx int - offset int64 -} - -type multiReadSeeker struct { - readers []io.ReadSeeker - pos *pos - posIdx map[io.ReadSeeker]int -} - -func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { - var tmpOffset int64 - switch whence { - case os.SEEK_SET: - for i, rdr := range r.readers { - // get size of the current reader - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - if offset > tmpOffset+s { - if i == len(r.readers)-1 { - rdrOffset := s + (offset - tmpOffset) - if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { - return -1, err - } - r.pos = &pos{i, rdrOffset} - return offset, nil - } - - tmpOffset += s - continue - } - - rdrOffset := offset - tmpOffset - idx := i - - rdr.Seek(rdrOffset, os.SEEK_SET) - // make sure all following readers are at 0 - for _, rdr := range r.readers[i+1:] { - rdr.Seek(0, os.SEEK_SET) - } - - if rdrOffset == s && i != len(r.readers)-1 { - idx++ - rdrOffset = 0 - } - r.pos = &pos{idx, rdrOffset} - return offset, nil - } - case os.SEEK_END: - for _, rdr := range r.readers { - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - tmpOffset += s - } - r.Seek(tmpOffset+offset, os.SEEK_SET) - return tmpOffset + offset, nil - case os.SEEK_CUR: - if r.pos == nil { - return r.Seek(offset, os.SEEK_SET) - } - // Just return the current offset - if offset == 0 { - return r.getCurOffset() - } - - curOffset, err := r.getCurOffset() - if err != nil { - return -1, err - } - rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) - if err != nil { - return -1, err - } - - r.pos = &pos{r.posIdx[rdr], rdrOffset} - return curOffset + offset, nil - default: - return -1, fmt.Errorf("Invalid whence: %d", whence) - } - - return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) -} - -func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { - - var offsetTo int64 - - for _, rdr := range r.readers { - size, err := getReadSeekerSize(rdr) - if err != nil { - return nil, -1, err - } - if offsetTo+size > offset { - return rdr, offset - offsetTo, nil - } - if rdr == r.readers[len(r.readers)-1] { - return rdr, offsetTo + offset, nil - } - offsetTo += size - } - - return nil, 0, nil -} - -func (r *multiReadSeeker) getCurOffset() (int64, error) { - var totalSize int64 - for _, rdr := range r.readers[:r.pos.idx+1] { - if r.posIdx[rdr] == r.pos.idx { - totalSize += r.pos.offset - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, fmt.Errorf("error getting seeker size: %v", err) - } - totalSize += size - } - return totalSize, nil -} - -func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { - var offset int64 - for _, r := range r.readers { - if r == rdr { - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, err - } - offset += size - } - return offset, nil -} - -func (r *multiReadSeeker) Read(b []byte) (int, error) { - if r.pos == nil { - // make sure all readers are at 0 - r.Seek(0, os.SEEK_SET) - } - - bLen := int64(len(b)) - buf := bytes.NewBuffer(nil) - var rdr io.ReadSeeker - - for _, rdr = range r.readers[r.pos.idx:] { - readBytes, err := io.CopyN(buf, rdr, bLen) - if err != nil && err != io.EOF { - return -1, err - } - bLen -= readBytes - - if bLen == 0 { - break - } - } - - rdrPos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - r.pos = &pos{r.posIdx[rdr], rdrPos} - return buf.Read(b) -} - -func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { - // save the current position - pos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - - // get the size - size, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - // reset the position - if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { - return -1, err - } - return size, nil -} - -// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided -// input readseekers. After calling this method the initial position is set to the -// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances -// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. -// Seek can be used over the sum of lengths of all readseekers. -// -// When a MultiReadSeeker is used, no Read and Seek operations should be made on -// its ReadSeeker components. Also, users should make no assumption on the state -// of individual readseekers while the MultiReadSeeker is used. -func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { - if len(readers) == 1 { - return readers[0] - } - idx := make(map[io.ReadSeeker]int) - for i, rdr := range readers { - idx[rdr] = i - } - return &multiReadSeeker{ - readers: readers, - posIdx: idx, - } -} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go new file mode 100644 index 000000000..607dbed43 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags.go @@ -0,0 +1,149 @@ +package mount + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := parseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go new file mode 100644 index 000000000..5f76f331b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go @@ -0,0 +1,49 @@ +// +build freebsd,cgo + +package mount + +/* +#include +*/ +import "C" + +const ( + // RDONLY will mount the filesystem as read-only. + RDONLY = C.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = C.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = C.MNT_NOEXEC + + // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. + SYNCHRONOUS = C.MNT_SYNCHRONOUS + + // NOATIME will not update the file access time when reading from a file. + NOATIME = C.MNT_NOATIME +) + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 + mntDetach = 0 +) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go new file mode 100644 index 000000000..25f466183 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go @@ -0,0 +1,87 @@ +package mount + +import ( + "syscall" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = syscall.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = syscall.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = syscall.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = syscall.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = syscall.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = syscall.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = syscall.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = syscall.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = syscall.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = syscall.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = syscall.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = syscall.MS_BIND | syscall.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = syscall.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = syscall.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = syscall.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = syscall.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = syscall.MS_SHARED | syscall.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = syscall.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = syscall.MS_STRICTATIME + + mntDetach = syscall.MNT_DETACH +) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go new file mode 100644 index 000000000..9ed741e3f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go @@ -0,0 +1,31 @@ +// +build !linux,!freebsd freebsd,!cgo solaris,!cgo + +package mount + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 + mntDetach = 0 +) diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go new file mode 100644 index 000000000..c9fdfd694 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -0,0 +1,86 @@ +package mount + +import ( + "sort" + "strings" +) + +// GetMounts retrieves a list of mounts for the current running process. +func GetMounts() ([]*Info, error) { + return parseMountTable() +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + return mount(device, target, mType, uintptr(flag), data) +} + +// Unmount lazily unmounts a filesystem on supported platforms, otherwise +// does a normal unmount. +func Unmount(target string) error { + if mounted, err := Mounted(target); err != nil || !mounted { + return err + } + return unmount(target, mntDetach) +} + +// RecursiveUnmount unmounts the target and all mounts underneath, starting with +// the deepsest mount first. +func RecursiveUnmount(target string) error { + mounts, err := GetMounts() + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Sort(sort.Reverse(byMountpoint(mounts))) + + for i, m := range mounts { + if !strings.HasPrefix(m.Mountpoint, target) { + continue + } + if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { + if mounted, err := Mounted(m.Mountpoint); err != nil || mounted { + return err + } + // Ignore errors for submounts and continue trying to unmount others + // The final unmount should fail if there ane any submounts remaining + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go new file mode 100644 index 000000000..bb870e6f5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go @@ -0,0 +1,59 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "syscall" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go new file mode 100644 index 000000000..3ef2ce6f0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go @@ -0,0 +1,56 @@ +package mount + +import ( + "syscall" +) + +const ( + // ptypes is the set propagation types. + ptypes = syscall.MS_SHARED | syscall.MS_PRIVATE | syscall.MS_SLAVE | syscall.MS_UNBINDABLE + + // pflags is the full set valid flags for a change propagation call. + pflags = ptypes | syscall.MS_REC | syscall.MS_SILENT + + // broflags is the combination of bind and read only + broflags = syscall.MS_BIND | syscall.MS_RDONLY +) + +// isremount returns true if either device name or flags identify a remount request, false otherwise. +func isremount(device string, flags uintptr) bool { + switch { + // We treat device "" and "none" as a remount request to provide compatibility with + // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. + case flags&syscall.MS_REMOUNT != 0, device == "", device == "none": + return true + default: + return false + } +} + +func mount(device, target, mType string, flags uintptr, data string) error { + oflags := flags &^ ptypes + if !isremount(device, flags) { + // Initial call applying all non-propagation flags. + if err := syscall.Mount(device, target, mType, oflags, data); err != nil { + return err + } + } + + if flags&ptypes != 0 { + // Change the propagation type. + if err := syscall.Mount("", target, "", flags&pflags, ""); err != nil { + return err + } + } + + if oflags&broflags == broflags { + // Remount the bind to apply read only. + return syscall.Mount("", target, "", oflags|syscall.MS_REMOUNT, "") + } + + return nil +} + +func unmount(target string, flag int) error { + return syscall.Unmount(target, flag) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go new file mode 100644 index 000000000..c684aa81f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go @@ -0,0 +1,33 @@ +// +build solaris,cgo + +package mount + +import ( + "golang.org/x/sys/unix" + "unsafe" +) + +// #include +// #include +// #include +// int Mount(const char *spec, const char *dir, int mflag, +// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { +// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); +// } +import "C" + +func mount(device, target, mType string, flag uintptr, data string) error { + spec := C.CString(device) + dir := C.CString(target) + fstype := C.CString(mType) + _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) + C.free(unsafe.Pointer(spec)) + C.free(unsafe.Pointer(dir)) + C.free(unsafe.Pointer(fstype)) + return err +} + +func unmount(target string, flag int) error { + err := unix.Unmount(target, flag) + return err +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go new file mode 100644 index 000000000..a2a3bb457 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go new file mode 100644 index 000000000..ff4cc1d86 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go @@ -0,0 +1,54 @@ +package mount + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} + +type byMountpoint []*Info + +func (by byMountpoint) Len() int { + return len(by) +} + +func (by byMountpoint) Less(i, j int) bool { + return by[i].Mountpoint < by[j].Mountpoint +} + +func (by byMountpoint) Swap(i, j int) { + by[i], by[j] = by[j], by[i] +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 000000000..4f32edcd9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,41 @@ +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts. +func parseMountTable() ([]*Info, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*Info + for _, entry := range entries { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + } + return out, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go new file mode 100644 index 000000000..be69fee1d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -0,0 +1,95 @@ +// +build linux + +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out = []*Info{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go new file mode 100644 index 000000000..ad9ab57f8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package mount + +/* +#include +#include +*/ +import "C" + +import ( + "fmt" +) + +func parseMountTable() ([]*Info, error) { + mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) + if mnttab == nil { + return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) + } + + var out []*Info + var mp C.struct_mnttab + + ret := C.getmntent(mnttab, &mp) + for ret == 0 { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) + mountinfo.Source = C.GoString(mp.mnt_special) + mountinfo.Fstype = C.GoString(mp.mnt_fstype) + mountinfo.Opts = C.GoString(mp.mnt_mntopts) + out = append(out, &mountinfo) + ret = C.getmntent(mnttab, &mp) + } + + C.fclose(mnttab) + return out, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 000000000..7fbcf1921 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +import ( + "fmt" + "runtime" +) + +func parseMountTable() ([]*Info, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go new file mode 100644 index 000000000..dab8a37ed --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go @@ -0,0 +1,6 @@ +package mount + +func parseMountTable() ([]*Info, error) { + // Do NOT return an error! + return nil, nil +} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 000000000..8ceec84bc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,69 @@ +// +build linux + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + if _, err = Mounted(mountPoint); err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go new file mode 100644 index 000000000..09f6b03cb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go @@ -0,0 +1,58 @@ +// +build solaris + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + // TODO: Solaris does not support bind mounts. + // Evaluate lofs and also look at the relevant + // mount flags to be supported. + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go index 7637f12e1..056d19954 100644 --- a/vendor/github.com/docker/docker/pkg/system/chtimes.go +++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go @@ -2,26 +2,9 @@ package system import ( "os" - "syscall" "time" - "unsafe" ) -var ( - maxTime time.Time -) - -func init() { - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} - // Chtimes changes the access time and modified time of a file at the given path func Chtimes(name string, atime time.Time, mtime time.Time) error { unixMinTime := time.Unix(0, 0) diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go index 7aa920de1..102565f76 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys.go @@ -8,15 +8,14 @@ import ( "path/filepath" ) -// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// ACL'd for Builtin Administrators and Local System. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return MkdirAll(path, perm) +// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return MkdirAll(path, perm, sddl) } // MkdirAll creates a directory named path along with any necessary parents, // with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode) error { +func MkdirAll(path string, perm os.FileMode, sddl string) error { return os.MkdirAll(path, perm) } diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go index 626d2ad88..20117db91 100644 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -16,21 +16,28 @@ import ( winio "github.com/Microsoft/go-winio" ) +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System + SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +) + // MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// ACL'd for Builtin Administrators and Local System. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return mkdirall(path, true) +// with an appropriate SDDL defined ACL. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return mkdirall(path, true, sddl) } // MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, _ os.FileMode) error { - return mkdirall(path, false) +func MkdirAll(path string, _ os.FileMode, sddl string) error { + return mkdirall(path, false, sddl) } // mkdirall is a custom version of os.MkdirAll modified for use on Windows // so that it is both volume path aware, and can create a directory with // a DACL. -func mkdirall(path string, adminAndLocalSystem bool) error { +func mkdirall(path string, applyACL bool, sddl string) error { if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { return nil } @@ -64,15 +71,15 @@ func mkdirall(path string, adminAndLocalSystem bool) error { if j > 1 { // Create parent - err = mkdirall(path[0:j-1], false) + err = mkdirall(path[0:j-1], false, sddl) if err != nil { return err } } // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. - if adminAndLocalSystem { - err = mkdirWithACL(path) + if applyACL { + err = mkdirWithACL(path, sddl) } else { err = os.Mkdir(path, 0) } @@ -96,9 +103,9 @@ func mkdirall(path string, adminAndLocalSystem bool) error { // in golang to cater for creating a directory am ACL permitting full // access, with inheritance, to any subfolder/file for Built-in Administrators // and Local System. -func mkdirWithACL(name string) error { +func mkdirWithACL(name string, sddl string) error { sa := syscall.SecurityAttributes{Length: 0} - sddl := "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + sd, err := winio.SddlToSecurityDescriptor(sddl) if err != nil { return &os.PathError{Op: "mkdir", Path: name, Err: err} diff --git a/vendor/github.com/docker/docker/pkg/system/init.go b/vendor/github.com/docker/docker/pkg/system/init.go new file mode 100644 index 000000000..17935088d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go new file mode 100644 index 000000000..019c66441 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -0,0 +1,17 @@ +package system + +import "os" + +// LCOWSupported determines if Linux Containers on Windows are supported. +// Note: This feature is in development (06/17) and enabled through an +// environment variable. At a future time, it will be enabled based +// on build number. @jhowardmsft +var lcowSupported = false + +func init() { + // LCOW initialization + if os.Getenv("LCOW_SUPPORTED") != "" { + lcowSupported = true + } + +} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_unix.go b/vendor/github.com/docker/docker/pkg/system/lcow_unix.go new file mode 100644 index 000000000..cff33bb40 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lcow_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_windows.go b/vendor/github.com/docker/docker/pkg/system/lcow_windows.go new file mode 100644 index 000000000..e54d01e69 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lcow_windows.go @@ -0,0 +1,6 @@ +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return lcowSupported +} diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go new file mode 100644 index 000000000..f634a6be6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path.go @@ -0,0 +1,21 @@ +package system + +import "runtime" + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(platform string) string { + if runtime.GOOS == "windows" { + if platform != runtime.GOOS && LCOWSupported() { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go index c607c4db0..f3762e69d 100644 --- a/vendor/github.com/docker/docker/pkg/system/path_unix.go +++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -2,11 +2,6 @@ package system -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - // CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, // is the system drive. This is a no-op on Linux. func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go index cbfe2c157..aab891522 100644 --- a/vendor/github.com/docker/docker/pkg/system/path_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go @@ -8,15 +8,11 @@ import ( "strings" ) -// DefaultPathEnv is deliberately empty on Windows as the default path will be set by -// the container. Docker has no context of what the default path should be. -const DefaultPathEnv = "" - // CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. // This is used, for example, when validating a user provided path in docker cp. // If a drive letter is supplied, it must be the system drive. The drive letter // is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be contatenated with +// need the path in this syntax so that it can ultimately be concatenated with // a Windows long-path which doesn't support drive-letters. Examples: // C: --> Fail // C:\ --> \ diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go new file mode 100644 index 000000000..101b569a5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/rm.go @@ -0,0 +1,80 @@ +package system + +import ( + "os" + "syscall" + "time" + + "github.com/docker/docker/pkg/mount" + "github.com/pkg/errors" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any circumstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 5 + + // Attempt to unmount anything beneath this dir first + mount.RecursiveUnmount(dir) + + for { + err := os.RemoveAll(dir) + if err == nil { + return err + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if pe.Err != syscall.EBUSY { + return err + } + + if mounted, _ := mount.Mounted(pe.Path); mounted { + if e := mount.Unmount(pe.Path); e != nil { + if mounted, _ := mount.Mounted(pe.Path); mounted { + return errors.Wrapf(e, "error while removing %s", dir) + } + } + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go index 1f311874f..c328a6fb8 100644 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -8,8 +8,9 @@ import ( ) var ( - ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") + ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") ) // OSVersion is a wrapper for Windows version information @@ -66,6 +67,22 @@ func IsWindowsClient() bool { return osviex.ProductType == verNTWorkstation } +// IsIoTCore returns true if the currently running image is based off of +// Windows 10 IoT Core. +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsIoTCore() bool { + var returnedProductType uint32 + r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) + if r1 == 0 { + logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) + return false + } + const productIoTUAP = 0x0000007B + const productIoTUAPCommercial = 0x00000083 + return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial +} + // Unmount is a platform-specific helper function to call // the unmount syscall. Not supported on Windows func Unmount(dest string) error { diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go deleted file mode 100644 index 0b816650e..000000000 --- a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build go1.6,!go1.7 - -package tlsconfig - -import "crypto/tls" - -// Clone returns a clone of tls.Config. This function is provided for -// compatibility for go1.6 that doesn't include this method in stdlib. -func Clone(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - } -} diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf index 9632ed26a..98db7a977 100644 --- a/vendor/github.com/docker/docker/vendor.conf +++ b/vendor/github.com/docker/docker/vendor.conf @@ -1,44 +1,49 @@ # the following lines are in sorted order, FYI github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62 -github.com/Microsoft/hcsshim v0.5.13 -github.com/Microsoft/go-winio v0.3.9 +github.com/Microsoft/hcsshim v0.5.25 +github.com/Microsoft/go-winio v0.4.2 github.com/Sirupsen/logrus v0.11.0 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git github.com/gorilla/context v1.1 github.com/gorilla/mux v1.1 +github.com/jhowardmsft/opengcs v0.0.9 github.com/kr/pty 5cf931ef8f github.com/mattn/go-shellwords v1.0.3 github.com/tchap/go-patricia v2.2.6 github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 -# forked golang.org/x/net package includes a patch for lazy loading trace templates -golang.org/x/net c427ad74c6d7a814201695e9ffde0c5d400a7674 +golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9 github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1 -github.com/docker/go-connections e15c02316c12de00874640cd76311849de2aeed5 +github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756 github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 +github.com/pmezard/go-difflib v1.0.0 github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5 github.com/imdario/mergo 0.2.1 +golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0 #get libnetwork packages -github.com/docker/libnetwork 5dc95a3f9ce4b70bf08492ca37ec55c5b6d84975 +github.com/docker/libnetwork 6426d1e66f33c0b0c8bb135b7ee547447f54d043 github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894 github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b -github.com/hashicorp/memberlist 88ac4de0d1a0ca6def284b571342db3b777a4c37 +github.com/hashicorp/memberlist v0.1.0 +github.com/sean-/seed e2103e2c35297fb7e17febb81e49b312087a2372 +github.com/hashicorp/go-sockaddr acd314c5781ea706c710d9ea70069fd2e110d61d github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 -github.com/vishvananda/netlink 1e86b2bee5b6a7d377e4c02bb7f98209d6a7297c +github.com/vishvananda/netlink bd6d5de5ccef2d66b0a26177928d0d8895d7f969 github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d -github.com/coreos/etcd 824277cb3a577a0e8c829ca9ec557b973fe06d20 +github.com/coreos/etcd v3.2.1 +github.com/coreos/go-semver v0.2.0 github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065 github.com/hashicorp/consul v0.5.2 github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904 @@ -53,23 +58,20 @@ github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa github.com/pborman/uuid v1.0 -# get desired notary commit, might also need to be updated in Dockerfile -github.com/docker/notary v0.4.2 - -google.golang.org/grpc v1.0.4 -github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f -github.com/docker/go v1.5.1-1-1-gbaf439e -github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c +google.golang.org/grpc v1.3.0 # When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly -github.com/opencontainers/runc 9c2d8d184e5da67c95d601382adf14862e4f2228 https://github.com/docker/runc.git # libcontainer -github.com/opencontainers/runtime-spec 1c7c27d043c2a5e513a44084d2b10d77d1402b8c # specs -github.com/seccomp/libseccomp-golang v0.9.0 +github.com/opencontainers/runc 2d41c047c83e09a6d61d464906feb2a2f3c52aa4 https://github.com/docker/runc +github.com/opencontainers/image-spec f03dbe35d449c54915d235f1a3cf8f585a24babe +github.com/opencontainers/runtime-spec d42f1eb741e6361e858d83fc75aa6893b66292c4 # specs + +github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 + # libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json) github.com/coreos/go-systemd v4 github.com/godbus/dbus v4.0.0 github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 -github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93 +github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4 # gelf logging driver deps github.com/Graylog2/go-gelf 7029da823dad4ef3a876df61065156acb703b2ea @@ -95,18 +97,17 @@ golang.org/x/oauth2 96382aa079b72d8c014eb0c50f6c223d1e6a2de0 google.golang.org/api 3cc2e591b550923a2c5f0ab5a803feda924d5823 cloud.google.com/go 9d965e63e8cceb1b5d7977a202f0fcb8866d6525 github.com/googleapis/gax-go da06d194a00e19ce00d9011a13931c3f6f6887c7 -google.golang.org/genproto b3e7c2fb04031add52c4817f53f43757ccbf9c18 - -# native credentials -github.com/docker/docker-credential-helpers v0.5.0 +google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 # containerd -github.com/docker/containerd 9048e5e50717ea4497b757314bad98ea3763c145 +github.com/containerd/containerd 3addd840653146c90a254301d6c3a663c7fd6429 github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4 +github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d +github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb # cluster -github.com/docker/swarmkit 61a92e8ec074df5769decda985df4a3ab43c77eb -github.com/gogo/protobuf 8d70fb3182befc465c4a1eac8ad4d38ff49778e2 +github.com/docker/swarmkit 79381d0840be27f8b3f5c667b348a4467d866eeb +github.com/gogo/protobuf v0.4 github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 @@ -129,15 +130,9 @@ github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7 github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty # metrics -github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72 +github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18 -# composefile -github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715 -github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a -github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 -github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d -gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6 +github.com/opencontainers/selinux v1.0.0-rc1 diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 000000000..7be0cc7b6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,36 @@ +Protocol Buffers for Go with Gadgets + +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http://github.com/gogo/protobuf + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/gogo/protobuf/README b/vendor/github.com/gogo/protobuf/README new file mode 100644 index 000000000..0ad513633 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/README @@ -0,0 +1,258 @@ +GoGoProtobuf http://github.com/gogo/protobuf extends +GoProtobuf http://github.com/golang/protobuf + +# Go support for Protocol Buffers + +Google's data interchange format. +Copyright 2010 The Go Authors. +https://github.com/golang/protobuf + +This package and the code it generates requires at least Go 1.4. + +This software implements Go bindings for protocol buffers. For +information about protocol buffers themselves, see + https://developers.google.com/protocol-buffers/ + +## Installation ## + +To use this software, you must: +- Install the standard C++ implementation of protocol buffers from + https://developers.google.com/protocol-buffers/ +- Of course, install the Go compiler and tools from + https://golang.org/ + See + https://golang.org/doc/install + for details or, if you are using gccgo, follow the instructions at + https://golang.org/doc/install/gccgo +- Grab the code from the repository and install the proto package. + The simplest way is to run `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`. + The compiler plugin, protoc-gen-go, will be installed in $GOBIN, + defaulting to $GOPATH/bin. It must be in your $PATH for the protocol + compiler, protoc, to find it. + +This software has two parts: a 'protocol compiler plugin' that +generates Go source files that, once compiled, can access and manage +protocol buffers; and a library that implements run-time support for +encoding (marshaling), decoding (unmarshaling), and accessing protocol +buffers. + +There is support for gRPC in Go using protocol buffers. +See the note at the bottom of this file for details. + +There are no insertion points in the plugin. + +GoGoProtobuf provides extensions for protocol buffers and GoProtobuf +see http://github.com/gogo/protobuf/gogoproto/doc.go + +## Using protocol buffers with Go ## + +Once the software is installed, there are two steps to using it. +First you must compile the protocol buffer definitions and then import +them, with the support library, into your program. + +To compile the protocol buffer definition, run protoc with the --gogo_out +parameter set to the directory you want to output the Go code to. + + protoc --gogo_out=. *.proto + +The generated files will be suffixed .pb.go. See the Test code below +for an example using such a file. + +The package comment for the proto library contains text describing +the interface provided in Go for protocol buffers. Here is an edited +version. + +If you are using any gogo.proto extensions you will need to specify the +proto_path to include the descriptor.proto and gogo.proto. +gogo.proto is located in github.com/gogo/protobuf/gogoproto +This should be fine, since your import is the same. +descriptor.proto is located in either github.com/gogo/protobuf/protobuf +or code.google.com/p/protobuf/trunk/src/ +Its import is google/protobuf/descriptor.proto so it might need some help. + + protoc --gogo_out=. -I=.:github.com/gogo/protobuf/protobuf *.proto + +========== + +The proto package converts data structures to and from the +wire format of protocol buffers. It works in concert with the +Go source code generated for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + Helpers for getting values are superseded by the + GetFoo methods and their use is deprecated. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed with the enum's type name. Enum types have + a String method, and a Enum method to assist in message construction. + - Nested groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + +Consider file test.proto, containing + +```proto + package example; + + enum FOO { X = 17; }; + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + } +``` + +To create and play with a Test object from the example package, + +```go + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + "path/to/example" + ) + + func main() { + test := &example.Test { + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &example.Test_OptionalGroup { + RequiredField: proto.String("good bye"), + }, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &example.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // etc. + } +``` + + +## Parameters ## + +To pass extra parameters to the plugin, use a comma-separated +parameter list separated from the output directory by a colon: + + + protoc --gogo_out=plugins=grpc,import_path=mypackage:. *.proto + + +- `import_prefix=xxx` - a prefix that is added onto the beginning of + all imports. Useful for things like generating protos in a + subdirectory, or regenerating vendored protobufs in-place. +- `import_path=foo/bar` - used as the package if no input files + declare `go_package`. If it contains slashes, everything up to the + rightmost slash is ignored. +- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to + load. The only plugin in this repo is `grpc`. +- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is + associated with Go package quux/shme. This is subject to the + import_prefix parameter. + +## gRPC Support ## + +If a proto file specifies RPC services, protoc-gen-go can be instructed to +generate code compatible with gRPC (http://www.grpc.io/). To do this, pass +the `plugins` parameter to protoc-gen-go; the usual way is to insert it into +the --go_out argument to protoc: + + protoc --gogo_out=plugins=grpc:. *.proto + +## Compatibility ## + +The library and the generated code are expected to be stable over time. +However, we reserve the right to make breaking changes without notice for the +following reasons: + +- Security. A security issue in the specification or implementation may come to + light whose resolution requires breaking compatibility. We reserve the right + to address such security issues. +- Unspecified behavior. There are some aspects of the Protocol Buffers + specification that are undefined. Programs that depend on such unspecified + behavior may break in future releases. +- Specification errors or changes. If it becomes necessary to address an + inconsistency, incompleteness, or change in the Protocol Buffers + specification, resolving the issue could affect the meaning or legality of + existing programs. We reserve the right to address such issues, including + updating the implementations. +- Bugs. If the library has a bug that violates the specification, a program + that depends on the buggy behavior may break if the bug is fixed. We reserve + the right to fix such bugs. +- Adding methods or fields to generated structs. These may conflict with field + names that already exist in a schema, causing applications to break. When the + code generator encounters a field in the schema that would collide with a + generated field or method name, the code generator will append an underscore + to the generated field or method name. +- Adding, removing, or changing methods or fields in generated structs that + start with `XXX`. These parts of the generated code are exported out of + necessity, but should not be considered part of the public API. +- Adding, removing, or changing unexported symbols in generated code. + +Any breaking changes outside of these will be announced 6 months in advance to +protobuf@googlegroups.com. + +You should, whenever possible, use generated code created by the `protoc-gen-go` +tool built at the same commit as the `proto` package. The `proto` package +declares package-level constants in the form `ProtoPackageIsVersionX`. +Application code and generated code may depend on one of these constants to +ensure that compilation will fail if the available version of the proto library +is too old. Whenever we make a change to the generated code that requires newer +library support, in the same commit we will increment the version number of the +generated code and declare a new package-level constant whose name incorporates +the latest version number. Removing a compatibility constant is considered a +breaking change and would be subject to the announcement policy stated above. + +## Plugins ## + +The `protoc-gen-go/generator` package exposes a plugin interface, +which is used by the gRPC code generation. This interface is not +supported and is subject to incompatible changes without notice. diff --git a/vendor/github.com/gogo/protobuf/Readme.md b/vendor/github.com/gogo/protobuf/Readme.md new file mode 100644 index 000000000..d31c13f83 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/Readme.md @@ -0,0 +1,117 @@ +# Protocol Buffers for Go with Gadgets + +[![Build Status](https://travis-ci.org/gogo/protobuf.svg?branch=master)](https://travis-ci.org/gogo/protobuf) + +gogoprotobuf is a fork of golang/protobuf with extra code generation features. + +This code generation is used to achieve: + + - fast marshalling and unmarshalling + - more canonical Go structures + - goprotobuf compatibility + - less typing by optionally generating extra helper code + - peace of mind by optionally generating test and benchmark code + - other serialization formats + +Keeping track of how up to date gogoprotobuf is relative to golang/protobuf is done in this +issue + +## Users + +These projects use gogoprotobuf: + + - etcd - blog - sample proto file + - spacemonkey - blog + - badoo - sample proto file + - mesos-go - sample proto file + - heka - the switch from golang/protobuf to gogo/protobuf when it was still on code.google.com + - cockroachdb - sample proto file + - go-ipfs - sample proto file + - rkive-go - sample proto file + - dropbox + - srclib - sample proto file + - adyoulike + - cloudfoundry - sample proto file + - kubernetes - go2idl built on top of gogoprotobuf + - dgraph - release notes - benchmarks + - centrifugo - release notes - blog + - docker swarmkit - sample proto file + - nats.io - go-nats-streaming + - tidb - Communication between tidb and tikv + - protoactor-go - vanity command that also generates actors from service definitions + +Please lets us know if you are using gogoprotobuf by posting on our GoogleGroup. + +### Mentioned + + - Cloudflare - go serialization talk - Albert Strasheim + - gophercon + - alecthomas' go serialization benchmarks + +## Getting Started + +There are several ways to use gogoprotobuf, but for all you need to install go and protoc. +After that you can choose: + + - Speed + - More Speed and more generated code + - Most Speed and most customization + +### Installation + +To install it, you must first have Go (at least version 1.6.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Go 1.7.1 and 1.8 is continuously tested. + +Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf). +Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.1.0 are continuously tested. + +### Speed + +Install the protoc-gen-gofast binary + + go get github.com/gogo/protobuf/protoc-gen-gofast + +Use it to generate faster marshaling and unmarshaling go code for your protocol buffers. + + protoc --gofast_out=. myproto.proto + +This does not allow you to use any of the other gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md). + +### More Speed and more generated code + +Fields without pointers cause less time in the garbage collector. +More code generation results in more convenient methods. + +Other binaries are also included: + + protoc-gen-gogofast (same as gofast, but imports gogoprotobuf) + protoc-gen-gogofaster (same as gogofast, without XXX_unrecognized, less pointer fields) + protoc-gen-gogoslick (same as gogofaster, but with generated string, gostring and equal methods) + +Installing any of these binaries is easy. Simply run: + + go get github.com/gogo/protobuf/proto + go get github.com/gogo/protobuf/{binary} + go get github.com/gogo/protobuf/gogoproto + +These binaries allow you to using gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md). + +### Most Speed and most customization + +Customizing the fields of the messages to be the fields that you actually want to use removes the need to copy between the structs you use and structs you use to serialize. +gogoprotobuf also offers more serialization formats and generation of tests and even more methods. + +Please visit the [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md) page for more documentation. + +Install protoc-gen-gogo: + + go get github.com/gogo/protobuf/proto + go get github.com/gogo/protobuf/jsonpb + go get github.com/gogo/protobuf/protoc-gen-gogo + go get github.com/gogo/protobuf/gogoproto + +## GRPC + +It works the same as golang/protobuf, simply specify the plugin. +Here is an example using gofast: + + protoc --gofast_out=plugins=grpc:. my.proto diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 000000000..5d4cba4b5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,234 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, ok := extendable(in.Addr().Interface()); ok { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 000000000..737f2731d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,978 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + if required > 0 { + // Not enough information to determine the exact field. + // (See below.) + return &RequiredNotSetError{"{Unknown}"} + } + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok { + if isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + ext := e.GetExtensions() + *ext = append(*ext, o.buf[oi:o.index]...) + } + continue + } + } else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + extmap := e.extensionsWrite() + ext := extmap[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + extmap[int32(tag)] = ext + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } + + y := *v + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() { + keyelem = reflect.Zero(p.mtype.Key()) + } + if !valelem.IsValid() { + valelem = reflect.Zero(p.mtype.Elem()) + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go new file mode 100644 index 000000000..6fb74de4c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go @@ -0,0 +1,172 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +// Decode a reference to a struct pointer. +func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is a pointer receiver") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + bas := structPointer_FieldPointer(base, p.field) + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers ([]struct). +func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { + newBas := appendStructPointer(base, p.field, p.sstype) + + if is_group { + panic("not supported, maybe in future, if requested.") + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is not a pointer receiver.") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) + + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers. +func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_ref_struct(p, false, base) +} + +func setPtrCustomType(base structPointer, f field, v interface{}) { + if v == nil { + return + } + structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) +} + +func setCustomType(base structPointer, f field, value interface{}) { + if value == nil { + return + } + v := reflect.ValueOf(value).Elem() + t := reflect.TypeOf(value).Elem() + kind := t.Kind() + switch kind { + case reflect.Slice: + slice := reflect.MakeSlice(t, v.Len(), v.Cap()) + reflect.Copy(slice, v) + oldHeader := structPointer_GetSliceHeader(base, f) + oldHeader.Data = slice.Pointer() + oldHeader.Len = v.Len() + oldHeader.Cap = v.Cap() + default: + size := reflect.TypeOf(value).Elem().Size() + structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size)) + } +} + +func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + setPtrCustomType(base, p.field, custom) + return nil +} + +func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + if custom != nil { + setCustomType(base, p.field, custom) + } + return nil +} + +// Decode a slice of bytes ([]byte) into a slice of custom types. +func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + newBas := appendStructPointer(base, p.field, p.ctype) + + var zero field + setCustomType(newBas, zero, custom) + + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 000000000..93464c91c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 000000000..18e2a5f77 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,203 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} + +func (o *Buffer) decDuration() (time.Duration, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return 0, err + } + dproto := &duration{} + if err := Unmarshal(b, dproto); err != nil { + return 0, err + } + return durationFromProto(dproto) +} + +func (o *Buffer) dec_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))) + var zero field + setPtrCustomType(newBas, zero, &d) + return nil +} + +func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + structPointer_Word64Slice(base, p.field).Append(uint64(d)) + return nil +} + +func size_duration(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_duration(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_duration(p *Properties, base structPointer) (n int) { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return 0 + } + dproto := durationProto(*durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return errRepeatedHasNil + } + dproto := durationProto(*durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 000000000..2b30f8462 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,1362 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// maxMarshalSize is the largest allowed size of an encoded protobuf, +// since C++ and Java use signed int32s for the size. +const maxMarshalSize = 1<<31 - 1 + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + p.buf = append(p.buf, data...) + return err + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Encode++ // Parens are to work around a goimports bug. + } + + if len(p.buf) > maxMarshalSize { + return ErrTooLarge + } + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Size++ // Parens are to work around a goimports bug. + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + exts := structPointer_ExtMap(base, p.field) + if err := encodeExtensionsMap(*exts); err != nil { + return err + } + + return o.enc_map_body(*exts) +} + +func (o *Buffer) enc_exts(p *Properties, base structPointer) error { + exts := structPointer_Extensions(base, p.field) + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { + return err + } + + return o.enc_map_body(v) +} + +func (o *Buffer) enc_map_body(v map[int32]Extension) error { + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := structPointer_ExtMap(base, p.field) + return extensionsMapSize(*v) +} + +func size_exts(p *Properties, base structPointer) int { + v := structPointer_Extensions(base, p.field) + return extensionsSize(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { + return err + } + return nil + } + + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + if len(o.buf) > maxMarshalSize { + return ErrTooLarge + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err == ErrNil { + return errOneofHasNil + } else if err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(o.buf)+len(v) > maxMarshalSize { + return ErrTooLarge + } + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 000000000..32111b7f4 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,350 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://github.com/golang/protobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} + +type Sizer interface { + Size() int +} + +func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, s...) + return nil +} + +func size_ext_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return 0 + } + n += len(s) + return +} + +// Encode a reference to bool pointer. +func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + x := 0 + if v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_bool(p *Properties, base structPointer) int { + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode a reference to int32 pointer. +func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a reference to an int64 pointer. +func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_ref_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a reference to a string pointer. +func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_ref_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// Encode a reference to a message struct. +func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +//TODO this is only copied, please fix this +func size_ref_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a slice of references to message struct pointers ([]struct). +func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() + for i := 0; i < l; i++ { + structp := ss.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + } + return state.err +} + +//TODO this is only copied, please fix this +func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := ss.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error { + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return ErrNil + } + custom := i.(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error { + custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_ref_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceAt(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return ErrNil + } + slice := reflect.ValueOf(inter) + l := slice.Len() + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_custom_slice_bytes(p *Properties, base structPointer) (n int) { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return 0 + } + slice := reflect.ValueOf(inter) + l := slice.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + } + return +} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 000000000..2ed1cf596 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 000000000..0dfcb538e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,693 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, bool) { + if ep, ok := p.(extendableProto); ok { + return ep, ok + } + if ep, ok := p.(extendableProtoV1); ok { + return extensionAdapter{ep}, ok + } + return nil, false +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +type extensionRange interface { + Message + ExtensionRangeArray() []ExtensionRange +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() +var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() +var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem() +var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, ok := extendable(base) + if !ok { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extensionRange, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensions(e *XXX_InternalExtensions) error { + m, mu := e.extensionsRead() + if m == nil { + return nil // fast path + } + mu.Lock() + defer mu.Unlock() + return encodeExtensionsMap(m) +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensionsMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func extensionsSize(e *XXX_InternalExtensions) (n int) { + m, mu := e.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + defer mu.Unlock() + return extensionsMapSize(m) +} + +func extensionsMapSize(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, ok := extendable(pb) + if !ok { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok = extmap[extension.Field] + mu.Unlock() + return ok +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, doki := pb.(extensionsBytes); doki { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, ok := extendable(pb) + if !ok { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present and has no default value it returns ErrMissingExtension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + o := 0 + for o < len(*ext) { + tag, n := DecodeVarint((*ext)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size((*ext)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + v, err := decodeExtension((*ext)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) + } + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(pb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, ok := extendable(pb) + if !ok { + return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, doki := pb.(extensionsBytes); doki { + ClearExtension(pb, extension) + ext := epb.GetExtensions() + et := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + p := NewBuffer(nil) + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + *ext = append(*ext, p.buf...) + return nil + } + epb, ok := extendable(pb) + if !ok { + return errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, ok := extendable(pb) + if !ok { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 000000000..ea6478f00 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,294 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strings" + "sync" +) + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + return SizeOfExtensionMap(m.extensionsWrite()) +} + +func SizeOfExtensionMap(m map[int32]Extension) (n int) { + return extensionsMapSize(m) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + if err := encodeExtensionsMap(m); err != nil { + return 0, err + } + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + for _, k := range keys { + n += copy(data[n:], m[int32(k)].enc) + } + return n, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + if m[id].value == nil || m[id].desc == nil { + return m[id].enc, nil + } + if err := encodeExtensionsMap(m); err != nil { + return nil, err + } + return m[id].enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(e *Extension) error { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + return nil + } + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + return nil +} + +func (this Extension) GoString() string { + if this.enc == nil { + if err := encodeExtension(&this); err != nil { + panic(err) + } + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 000000000..7580bb45c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,898 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion1 = true diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 000000000..4b4f7c909 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,42 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 000000000..fd982decd --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,311 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + if err := encodeExtensions(exts); err != nil { + return nil, err + } + m, _ = exts.extensionsRead() + case map[int32]Extension: + if err := encodeExtensionsMap(exts); err != nil { + return nil, err + } + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m, _ = exts.extensionsRead() + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000..fb512e2e1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,484 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// Extensions returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return structPointer_ifield(p, f).(*XXX_InternalExtensions) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 000000000..1763a5f22 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,85 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +package proto + +import ( + "reflect" +) + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + panic("not implemented") +} + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func structPointer_Add(p structPointer, size field) structPointer { + panic("not implemented") +} + +func structPointer_Len(p structPointer, f field) int { + panic("not implemented") +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + panic("not implemented") +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + panic("not implemented") +} + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + panic("not implemented") +} + +type structRefSlice struct{} + +func (v *structRefSlice) Len() int { + panic("not implemented") +} + +func (v *structRefSlice) Index(i int) structPointer { + panic("not implemented") +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000..6b5567d47 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,270 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 000000000..f156a29f0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,128 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + return r.Interface() +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + if r.Elem().IsNil() { + return nil + } + return r.Elem().Interface() +} + +func copyUintPtr(oldptr, newptr uintptr, size int) { + oldbytes := make([]byte, 0) + oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes)) + oldslice.Data = oldptr + oldslice.Len = size + oldslice.Cap = size + newbytes := make([]byte, 0) + newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes)) + newslice.Data = newptr + newslice.Len = size + newslice.Cap = size + copy(newbytes, oldbytes) +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + copyUintPtr(uintptr(oldptr), uintptr(newptr), size) +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + size := typ.Elem().Size() + + oldHeader := structPointer_GetSliceHeader(base, f) + oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem() + newLen := oldHeader.Len + 1 + newSlice := reflect.MakeSlice(typ, newLen, newLen) + reflect.Copy(newSlice, oldSlice) + bas := toStructPointer(newSlice) + oldHeader.Data = uintptr(bas) + oldHeader.Len = newLen + oldHeader.Cap = newLen + + return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size))) +} + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_Add(p structPointer, size field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size))) +} + +func structPointer_Len(p structPointer, f field) int { + return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) +} + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + return &structRefSlice{p: p, f: f, size: size} +} + +// A structRefSlice represents a slice of structs (themselves submessages or groups). +type structRefSlice struct { + p structPointer + f field + size uintptr +} + +func (v *structRefSlice) Len() int { + return structPointer_Len(v.p, v.f) +} + +func (v *structRefSlice) Index(i int) structPointer { + ss := structPointer_GetStructPointer(v.p, v.f) + ss1 := structPointer_GetRefStructPointer(ss, 0) + return structPointer_Add(ss1, field(uintptr(i)*v.size)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 000000000..44b332052 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,968 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + StdTime bool + StdDuration bool + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sstype reflect.Type // set for slices of structs types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.setCustomEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTimeEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setDurationEncAndDec(typ) + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + } else { + p.enc = (*Buffer).enc_ref_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_ref_bool + } + case reflect.Int32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + } else { + p.enc = (*Buffer).enc_ref_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_ref_int32 + } + case reflect.Uint32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + } else { + p.enc = (*Buffer).enc_ref_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_ref_uint32 + } + case reflect.Int64, reflect.Uint64: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + } else { + p.enc = (*Buffer).enc_ref_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_ref_int64 + } + case reflect.Float32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + } else { + p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_ref_uint32 + } + case reflect.Float64: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + } else { + p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_ref_int64 + } + case reflect.String: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + } else { + p.enc = (*Buffer).enc_ref_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_ref_string + } + case reflect.Struct: + p.stype = typ + p.isMarshaler = isMarshaler(typ) + p.isUnmarshaler = isUnmarshaler(typ) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_ref_struct_message + p.dec = (*Buffer).dec_ref_struct_message + p.size = size_ref_struct_message + } else { + fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ) + } + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.dec = (*Buffer).dec_slice_byte + if p.proto3 { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } else { + p.enc = (*Buffer).enc_slice_byte + p.size = size_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + case reflect.Struct: + p.setSliceOfNonPointerStructs(t1) + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.mvalprop.CustomType = p.CustomType + p.mvalprop.StdDuration = p.StdDuration + p.mvalprop.StdTime = p.StdTime + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || + reflect.PtrTo(t).Implements(extendableProtoV1Type) || + reflect.PtrTo(t).Implements(extendableBytesType) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_InternalExtensions" { // special case + p.enc = (*Buffer).enc_exts + p.dec = nil // not needed + p.size = size_exts + } else if f.Name == "XXX_extensions" { // special case + if len(f.Tag.Get("protobuf")) > 0 { + p.enc = (*Buffer).enc_ext_slice_byte + p.dec = nil // not needed + p.size = size_ext_slice_byte + } else { + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } + } else if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 000000000..b6b7176c5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,111 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "os" + "reflect" +) + +func (p *Properties) setCustomEncAndDec(typ reflect.Type) { + p.ctype = typ + if p.Repeated { + p.enc = (*Buffer).enc_custom_slice_bytes + p.dec = (*Buffer).dec_custom_slice_bytes + p.size = size_custom_slice_bytes + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_custom_bytes + p.dec = (*Buffer).dec_custom_bytes + p.size = size_custom_bytes + } else { + p.enc = (*Buffer).enc_custom_ref_bytes + p.dec = (*Buffer).dec_custom_ref_bytes + p.size = size_custom_ref_bytes + } +} + +func (p *Properties) setDurationEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_duration + p.dec = (*Buffer).dec_slice_duration + p.size = size_slice_duration + } else { + p.enc = (*Buffer).enc_slice_ref_duration + p.dec = (*Buffer).dec_slice_ref_duration + p.size = size_slice_ref_duration + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_duration + p.dec = (*Buffer).dec_duration + p.size = size_duration + } else { + p.enc = (*Buffer).enc_ref_duration + p.dec = (*Buffer).dec_ref_duration + p.size = size_ref_duration + } +} + +func (p *Properties) setTimeEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_time + p.dec = (*Buffer).dec_slice_time + p.size = size_slice_time + } else { + p.enc = (*Buffer).enc_slice_ref_time + p.dec = (*Buffer).dec_slice_ref_time + p.size = size_slice_ref_time + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_time + p.dec = (*Buffer).dec_time + p.size = size_time + } else { + p.enc = (*Buffer).enc_ref_time + p.dec = (*Buffer).dec_ref_time + p.size = size_ref_time + } + +} + +func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { + t2 := typ.Elem() + p.sstype = typ + p.stype = t2 + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + p.enc = (*Buffer).enc_slice_ref_struct_message + p.dec = (*Buffer).dec_slice_ref_struct_message + p.size = size_slice_ref_struct_message + if p.Wire != "bytes" { + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2) + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 000000000..5a5fd93f7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 000000000..d63732fcb --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,928 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if pv.Type().Implements(extensionRangeType) { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + props.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), props) + props.StdTime = true + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + props.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), props) + props.StdDuration = true + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := tm.writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 000000000..1d6c6aa0e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 000000000..9db12e960 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,1013 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 000000000..9324f6542 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 000000000..d42764743 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,229 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} + +func (o *Buffer) decTimestamp() (time.Time, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return time.Time{}, err + } + tproto := ×tamp{} + if err := Unmarshal(b, tproto); err != nil { + return time.Time{}, err + } + return timestampFromProto(tproto) +} + +func (o *Buffer) dec_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setPtrCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))) + var zero field + setPtrCustomType(newBas, zero, &t) + return nil +} + +func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType)) + var zero field + setCustomType(newBas, zero, &t) + return nil +} + +func size_time(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_time(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_time(p *Properties, base structPointer) (n int) { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return 0 + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return errRepeatedHasNil + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto new file mode 100644 index 000000000..8152a34e4 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto @@ -0,0 +1,140 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto new file mode 100644 index 000000000..acaee1f49 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto @@ -0,0 +1,150 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +syntax = "proto2"; +package google.protobuf.compiler; +option java_package = "com.google.protobuf.compiler"; +option java_outer_classname = "PluginProtos"; + +option go_package = "plugin_go"; + +import "google/protobuf/descriptor.proto"; + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +message CodeGeneratorRequest { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + repeated string file_to_generate = 1; + + // The generator parameter passed on the command-line. + optional string parameter = 2; + + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + repeated FileDescriptorProto proto_file = 15; +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +message CodeGeneratorResponse { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + optional string error = 1; + + // Represents a single generated file. + message File { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + optional string name = 1; + + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + optional string insertion_point = 2; + + // The file contents. + optional string content = 15; + } + repeated File file = 15; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto new file mode 100644 index 000000000..9aefdef2f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto @@ -0,0 +1,813 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; +option go_package = "descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option java_generate_equals_and_hash = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + TYPE_GROUP = 10; // Tag-delimited aggregate. + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + }; + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + // TODO(sanjay): Should we add LABEL_MAP? + }; + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default=false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default=false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default=false]; + + // If set true, then the Java code generator will generate equals() and + // hashCode() methods for all messages defined in the .proto file. + // This increases generated code size, potentially substantially for large + // protos, which may harm a memory-constrained application. + // - In the full runtime this is a speed optimization, as the + // AbstractMessage base class includes reflection-based implementations of + // these methods. + // - In the lite runtime, setting this option changes the semantics of + // equals() and hashCode() to more closely match those of the full runtime; + // the generated methods compute their results based on field values rather + // than object identity. (Implementations should not assume that hashcodes + // will be consistent across runtimes or versions of the protocol compiler.) + optional bool java_generate_equals_and_hash = 20 [default=false]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default=false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default=SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default=false]; + optional bool java_generic_services = 17 [default=false]; + optional bool py_generic_services = 18 [default=false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default=false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default=false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + //reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default=false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default=false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default=false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). By default these types are + // represented as JavaScript strings. This avoids loss of precision that can + // happen when a large value is converted to a floating point JavaScript + // numbers. Specifying JS_NUMBER for the jstype causes the generated + // JavaScript code to use the JavaScript "number" type instead of strings. + // This option is an enum to permit additional types to be added, + // e.g. goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outher message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default=false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default=false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default=false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed=true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed=true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto new file mode 100644 index 000000000..c4359dbd2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto @@ -0,0 +1,98 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto new file mode 100644 index 000000000..61a574b6a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto @@ -0,0 +1,53 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "EmptyProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +message Empty {} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto new file mode 100644 index 000000000..fd252889a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto @@ -0,0 +1,246 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "FieldMaskProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; +option java_generate_equals_and_hash = true; + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// field mask. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, the existing +// repeated values in the target resource will be overwritten by the new values. +// Note that a repeated field is only allowed in the last position of a field +// mask. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then the existing sub-message in the target resource is +// overwritten. Given the target message: +// +// f { +// b { +// d : 1 +// x : 2 +// } +// c : 1 +// } +// +// And an update message: +// +// f { +// b { +// d : 10 +// } +// } +// +// then if the field mask is: +// +// paths: "f.b" +// +// then the result will be: +// +// f { +// b { +// d : 10 +// } +// c : 1 +// } +// +// However, if the update mask was: +// +// paths: "f.b.d" +// +// then the result would be: +// +// f { +// b { +// d : 10 +// x : 2 +// } +// c : 1 +// } +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +message FieldMask { + // The set of field mask paths. + repeated string paths = 1; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto new file mode 100644 index 000000000..27fdb9188 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto new file mode 100644 index 000000000..c85024a42 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto @@ -0,0 +1,111 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// now = time.time() +// seconds = int(now) +// nanos = int((now - seconds) * 10**9) +// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto new file mode 100644 index 000000000..f685dc082 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto @@ -0,0 +1,119 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option java_generate_equals_and_hash = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/vendor/github.com/opencontainers/image-spec/LICENSE b/vendor/github.com/opencontainers/image-spec/LICENSE new file mode 100644 index 000000000..9fdc20fdb --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/image-spec/README.md b/vendor/github.com/opencontainers/image-spec/README.md new file mode 100644 index 000000000..5ab5554e4 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/README.md @@ -0,0 +1,167 @@ +# OCI Image Format Specification +
+ + + +
+ +The OCI Image Format project creates and maintains the software shipping container image format spec (OCI Image Format). + +**[The specification can be found here](spec.md).** + +This repository also provides [Go types](specs-go), [intra-blob validation tooling, and JSON Schema](schema). +The Go types and validation should be compatible with the current Go release; earlier Go releases are not supported. + +Additional documentation about how this group operates: + +- [Code of Conduct](https://github.com/opencontainers/tob/blob/d2f9d68c1332870e40693fe077d311e0742bc73d/code-of-conduct.md) +- [Roadmap](#roadmap) +- [Releases](RELEASES.md) +- [Project Documentation](project.md) + +The _optional_ and _base_ layers of all OCI projects are tracked in the [OCI Scope Table](https://www.opencontainers.org/about/oci-scope-table). + +## Running an OCI Image + +The OCI Image Format partner project is the [OCI Runtime Spec project](https://github.com/opencontainers/runtime-spec). +The Runtime Specification outlines how to run a "[filesystem bundle](https://github.com/opencontainers/runtime-spec/blob/master/bundle.md)" that is unpacked on disk. +At a high-level an OCI implementation would download an OCI Image then unpack that image into an OCI Runtime filesystem bundle. +At this point the OCI Runtime Bundle would be run by an OCI Runtime. + +This entire workflow supports the UX that users have come to expect from container engines like Docker and rkt: primarily, the ability to run an image with no additional arguments: + +* docker run example.com/org/app:v1.0.0 +* rkt run example.com/org/app,version=v1.0.0 + +To support this UX the OCI Image Format contains sufficient information to launch the application on the target platform (e.g. command, arguments, environment variables, etc). + +## FAQ + +**Q: Why doesn't this project mention distribution?** + +A: Distribution, for example using HTTP as both Docker v2.2 and AppC do today, is currently out of scope on the [OCI Scope Table](https://www.opencontainers.org/about/oci-scope-table). +There has been [some discussion on the TOB mailing list](https://groups.google.com/a/opencontainers.org/d/msg/tob/A3JnmI-D-6Y/tLuptPDHAgAJ) to make distribution an optional layer, but this topic is a work in progress. + +**Q: What happens to AppC or Docker Image Formats?** + +A: Existing formats can continue to be a proving ground for technologies, as needed. +The OCI Image Format project strives to provide a dependable open specification that can be shared between different tools and be evolved for years or decades of compatibility; as the deb and rpm format have. + +Find more [FAQ on the OCI site](https://www.opencontainers.org/faq). + +## Roadmap + +The [GitHub milestones](https://github.com/opencontainers/image-spec/milestones) lay out the path to the OCI v1.0.0 release in late 2016. + +# Contributing + +Development happens on GitHub for the spec. +Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list). + +The specification and code is licensed under the Apache 2.0 license found in the `LICENSE` file of this repository. + +## Discuss your design + +The project welcomes submissions, but please let everyone know what you are working on. + +Before undertaking a nontrivial change to this specification, send mail to the [mailing list](#mailing-list) to discuss what you plan to do. +This gives everyone a chance to validate the design, helps prevent duplication of effort, and ensures that the idea fits. +It also guarantees that the design is sound before code is written; a GitHub pull-request is not the place for high-level discussions. + +Typos and grammatical errors can go straight to a pull-request. +When in doubt, start on the [mailing-list](#mailing-list). + +## Weekly Call + +The contributors and maintainers of all OCI projects have a weekly meeting Wednesdays at 2:00 PM (USA Pacific). +Everyone is welcome to participate via [UberConference web][UberConference] or audio-only: +1-415-968-0849 (no PIN needed). +An initial agenda will be posted to the [mailing list](#mailing-list) earlier in the week, and everyone is welcome to propose additional topics or suggest other agenda alterations there. +Minutes are posted to the [mailing list](#mailing-list) and minutes from past calls are archived [here][minutes]. + +## Mailing List + +You can subscribe and join the mailing list on [Google Groups](https://groups.google.com/a/opencontainers.org/forum/#!forum/dev). + +## IRC + +OCI discussion happens on #opencontainers on Freenode ([logs][irc-logs]). + +## Markdown style + +To keep consistency throughout the Markdown files in the Open Container spec all files should be formatted one sentence per line. +This fixes two things: it makes diffing easier with git and it resolves fights about line wrapping length. +For example, this paragraph will span three lines in the Markdown source. + +## Git commit + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. +The rules are pretty simple: if you can certify the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. + +### Commit Style + +Simple house-keeping for clean git history. +Read more on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/) or the Discussion section of [`git-commit(1)`](http://git-scm.com/docs/git-commit). + +1. Separate the subject from body with a blank line +2. Limit the subject line to 50 characters +3. Capitalize the subject line +4. Do not end the subject line with a period +5. Use the imperative mood in the subject line +6. Wrap the body at 72 characters +7. Use the body to explain what and why vs. how + * If there was important/useful/essential conversation or information, copy or include a reference +8. When possible, one keyword to scope the change in the subject (i.e. "README: ...", "runtime: ...") + + +[UberConference]: https://www.uberconference.com/opencontainers +[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/ +[minutes]: http://ircbot.wl.linuxfoundation.org/meetings/opencontainers/ diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go new file mode 100644 index 000000000..8475ff741 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -0,0 +1,103 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. +type ImageConfig struct { + // User defines the username or UID which the process in the container should run as. + User string `json:"User,omitempty"` + + // ExposedPorts a set of ports to expose from a container running this image. + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + + // Env is a list of environment variables to be used in a container. + Env []string `json:"Env,omitempty"` + + // Entrypoint defines a list of arguments to use as the command to execute when the container starts. + Entrypoint []string `json:"Entrypoint,omitempty"` + + // Cmd defines the default arguments to the entrypoint of the container. + Cmd []string `json:"Cmd,omitempty"` + + // Volumes is a set of directories which should be created as data volumes in a container running this image. + Volumes map[string]struct{} `json:"Volumes,omitempty"` + + // WorkingDir sets the current working directory of the entrypoint process in the container. + WorkingDir string `json:"WorkingDir,omitempty"` + + // Labels contains arbitrary metadata for the container. + Labels map[string]string `json:"Labels,omitempty"` + + // StopSignal contains the system call signal that will be sent to the container to exit. + StopSignal string `json:"StopSignal,omitempty"` +} + +// RootFS describes a layer content addresses +type RootFS struct { + // Type is the type of the rootfs. + Type string `json:"type"` + + // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. + DiffIDs []digest.Digest `json:"diff_ids"` +} + +// History describes the history of a layer. +type History struct { + // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // CreatedBy is the command which created the layer. + CreatedBy string `json:"created_by,omitempty"` + + // Author is the author of the build point. + Author string `json:"author,omitempty"` + + // Comment is a custom message set when creating the layer. + Comment string `json:"comment,omitempty"` + + // EmptyLayer is used to mark if the history item created a filesystem diff. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. + Author string `json:"author,omitempty"` + + // Architecture is the CPU architecture which the binaries in this image are built to run on. + Architecture string `json:"architecture"` + + // OS is the name of the operating system which the image is built to run on. + OS string `json:"os"` + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` + + // RootFS references the layer content addresses used by the image. + RootFS RootFS `json:"rootfs"` + + // History describes the history of each layer. + History []History `json:"history,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go new file mode 100644 index 000000000..c461647ef --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -0,0 +1,68 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import digest "github.com/opencontainers/go-digest" + +// Descriptor describes the disposition of targeted content. +// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype +// when marshalled to JSON. +type Descriptor struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType,omitempty"` + + // Digest is the digest of the targeted content. + Digest digest.Digest `json:"digest"` + + // Size specifies the size in bytes of the blob. + Size int64 `json:"size"` + + // URLs specifies a list of URLs from which this object MAY be downloaded + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // + // This should only be used when referring to a manifest. + Platform *Platform `json:"platform,omitempty"` +} + +// Platform describes the platform which the image in the manifest runs on. +type Platform struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example `10.0.10586`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `ppc64le` to specify a little-endian version of a PowerPC CPU. + Variant string `json:"variant,omitempty"` + + // Features is an optional field specifying an array of strings, each + // listing a required CPU feature (for example `sse4` or `aes`). + Features []string `json:"features,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go new file mode 100644 index 000000000..4e6c4b236 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Index references manifests for various platforms. +// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. +type Index struct { + specs.Versioned + + // Manifests references platform specific manifests. + Manifests []Descriptor `json:"manifests"` + + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go new file mode 100644 index 000000000..fc79e9e0d --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // ImageLayoutFile is the file name of oci image layout file + ImageLayoutFile = "oci-layout" + // ImageLayoutVersion is the version of ImageLayout + ImageLayoutVersion = "1.0.0" +) + +// ImageLayout is the structure in the "oci-layout" file, found in the root +// of an OCI Image-layout directory. +type ImageLayout struct { + Version string `json:"imageLayoutVersion"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go new file mode 100644 index 000000000..7ff32c40b --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. +type Manifest struct { + specs.Versioned + + // Config references a configuration object for a container, by digest. + // The referenced configuration object is a JSON blob that the runtime uses to set up the container. + Config Descriptor `json:"config"` + + // Layers is an indexed list of layers referenced by the manifest. + Layers []Descriptor `json:"layers"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go new file mode 100644 index 000000000..bad7bb97f --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -0,0 +1,48 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // MediaTypeDescriptor specifies the media type for a content descriptor. + MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" + + // MediaTypeLayoutHeader specifies the media type for the oci-layout. + MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" + + // MediaTypeImageManifest specifies the media type for an image manifest. + MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" + + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + + // MediaTypeImageLayer is the media type used for layers referenced by the manifest. + MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" + + // MediaTypeImageLayerGzip is the media type used for gzipped layers + // referenced by the manifest. + MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + + // MediaTypeImageLayerNonDistributable is the media type for layers referenced by + // the manifest but with distribution restrictions. + MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + // MediaTypeImageLayerNonDistributableGzip is the media type for + // gzipped layers referenced by the manifest but with distribution + // restrictions. + MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go new file mode 100644 index 000000000..0e7d3be29 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 1 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 0 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 0 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "-rc5-dev" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go new file mode 100644 index 000000000..58a1510f3 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +}