From 0e4f0fe292a3abbd533db9eafe745f5b94d038b0 Mon Sep 17 00:00:00 2001 From: deniseschannon Date: Wed, 29 Apr 2015 20:47:47 -0700 Subject: [PATCH 1/3] Changes required due to labels type change in ServiceConfig --- config/config.go | 4 +- config/default.go | 116 +++++++++++++++++++++++----------------------- docker/factory.go | 2 +- init/bootstrap.go | 8 ++-- 4 files changed, 65 insertions(+), 65 deletions(-) diff --git a/config/config.go b/config/config.go index 2694f1b0..1af7c8fe 100644 --- a/config/config.go +++ b/config/config.go @@ -194,9 +194,9 @@ func Dump(private, full bool) (string, error) { func (c *Config) configureConsole() error { if console, ok := c.SystemContainers[CONSOLE_CONTAINER]; ok { if c.Console.Persistent { - console.Labels = append(console.Labels, REMOVE+"=false") + console.Labels.MapParts()[REMOVE] = "false" } else { - console.Labels = append(console.Labels, REMOVE+"=true") + console.Labels.MapParts()[REMOVE] = "true" } } diff --git a/config/default.go b/config/default.go index 11813391..f35340bf 100644 --- a/config/default.go +++ b/config/default.go @@ -79,10 +79,10 @@ func NewConfig() *Config { "udev": { Net: "host", Privileged: true, - Labels: []string{ - DETACH + "=false", - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + DETACH: "false", + SCOPE: SYSTEM, + }), Volumes: []string{ "/dev:/host/dev", "/lib/modules:/lib/modules", @@ -98,10 +98,10 @@ func NewConfig() *Config { Restart: "always", Net: "host", Privileged: true, - Labels: []string{ - DETACH + "=true", - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + DETACH: "true", + SCOPE: SYSTEM, + }), Environment: []string{ "DAEMON=true", }, @@ -114,10 +114,10 @@ func NewConfig() *Config { Net: "none", ReadOnly: true, Privileged: true, - Labels: []string{ - CREATE_ONLY + "=true", - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + CREATE_ONLY: "true", + SCOPE: SYSTEM, + }), Volumes: []string{ "/dev:/host/dev", "/var/lib/rancher/conf:/var/lib/rancher/conf", @@ -134,10 +134,10 @@ func NewConfig() *Config { Net: "none", ReadOnly: true, Privileged: true, - Labels: []string{ - CREATE_ONLY + "=true", - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + CREATE_ONLY: "true", + SCOPE: SYSTEM, + }), Volumes: []string{ "/init:/sbin/halt:ro", "/init:/sbin/poweroff:ro", @@ -159,10 +159,10 @@ func NewConfig() *Config { Net: "none", ReadOnly: true, Privileged: true, - Labels: []string{ - CREATE_ONLY + "=true", - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + CREATE_ONLY: "true", + SCOPE: SYSTEM, + }), Volumes: []string{ "/home:/home", "/opt:/opt", @@ -174,10 +174,10 @@ func NewConfig() *Config { Net: "none", ReadOnly: true, Privileged: true, - Labels: []string{ - CREATE_ONLY + "=true", - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + CREATE_ONLY: "true", + SCOPE: SYSTEM, + }), Volumes: []string{ "/var/lib/rancher:/var/lib/rancher", "/var/lib/docker:/var/lib/docker", @@ -190,10 +190,10 @@ func NewConfig() *Config { Net: "none", ReadOnly: true, Privileged: true, - Labels: []string{ - CREATE_ONLY + "=true", - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + CREATE_ONLY: "true", + SCOPE: SYSTEM, + }), VolumesFrom: []string{ "docker-volumes", "command-volumes", @@ -206,11 +206,11 @@ func NewConfig() *Config { Image: "cloudinit", Privileged: true, Net: "host", - Labels: []string{ - RELOAD_CONFIG + "=true", - DETACH + "=false", - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + RELOAD_CONFIG: "true", + DETACH: "false", + SCOPE: SYSTEM, + }), Environment: []string{ "CLOUD_INIT_NETWORK=false", }, @@ -223,10 +223,10 @@ func NewConfig() *Config { Image: "network", Privileged: true, Net: "host", - Labels: []string{ - DETACH + "=false", - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + DETACH: "false", + SCOPE: SYSTEM, + }), Links: []string{ "cloud-init-pre", }, @@ -238,11 +238,11 @@ func NewConfig() *Config { "cloud-init": { Image: "cloudinit", Privileged: true, - Labels: []string{ - RELOAD_CONFIG + "=true", - DETACH + "=false", - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + RELOAD_CONFIG: "true", + DETACH: "false", + SCOPE: SYSTEM, + }), Net: "host", Links: []string{ "cloud-init-pre", @@ -258,9 +258,9 @@ func NewConfig() *Config { Restart: "always", Privileged: true, Net: "host", - Labels: []string{ - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + SCOPE: SYSTEM, + }), Links: []string{ "cloud-init", "network", @@ -271,9 +271,9 @@ func NewConfig() *Config { Restart: "always", Privileged: true, Net: "host", - Labels: []string{ - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + SCOPE: SYSTEM, + }), VolumesFrom: []string{ "system-volumes", }, @@ -286,9 +286,9 @@ func NewConfig() *Config { Pid: "host", Ipc: "host", Net: "host", - Labels: []string{ - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + SCOPE: SYSTEM, + }), Links: []string{ "network", }, @@ -299,10 +299,10 @@ func NewConfig() *Config { "userdockerwait": { Image: "userdockerwait", Net: "host", - Labels: []string{ - DETACH + "=false", - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + DETACH: "false", + SCOPE: SYSTEM, + }), Links: []string{ "userdocker", }, @@ -316,9 +316,9 @@ func NewConfig() *Config { Links: []string{ "cloud-init", }, - Labels: []string{ - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + SCOPE: SYSTEM, + }), VolumesFrom: []string{ "all-volumes", }, diff --git a/docker/factory.go b/docker/factory.go index a925a4d3..d0842b61 100644 --- a/docker/factory.go +++ b/docker/factory.go @@ -77,7 +77,7 @@ func (c *containerBasedService) Name() string { } func isSystemService(serviceConfig *project.ServiceConfig) bool { - return util.GetValue(serviceConfig.Labels, config.SCOPE) == config.SYSTEM + return serviceConfig.Labels.MapParts()[config.SCOPE] == config.SYSTEM } func (c *ContainerFactory) Create(project *project.Project, name string, serviceConfig *project.ServiceConfig) (project.Service, error) { diff --git a/init/bootstrap.go b/init/bootstrap.go index ae826128..152f9714 100644 --- a/init/bootstrap.go +++ b/init/bootstrap.go @@ -79,10 +79,10 @@ outer: Privileged: true, Image: "autoformat", Command: format, - Labels: []string{ - config.DETACH + "=false", - config.SCOPE + "=" + config.SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + config.DETACH: "false", + config.SCOPE: config.SYSTEM, + }), LogDriver: "json-file", Environment: []string{ "MAGIC=" + boot2dockerMagic, From d845d1667423d0ded693a1082810bee0eecc3629 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 30 Apr 2015 22:16:02 -0700 Subject: [PATCH 2/3] Godep changes --- Godeps/Godeps.json | 125 +- .../github.com/docker/docker/api/README.md | 5 - .../docker/docker/api/api_unit_test.go | 19 - .../docker/docker/api/client/cli.go | 185 -- .../docker/docker/api/client/commands.go | 2937 ----------------- .../docker/docker/api/client/hijack.go | 250 -- .../docker/docker/api/client/utils.go | 296 -- .../github.com/docker/docker/api/common.go | 132 - .../docker/docker/api/server/server.go | 1624 --------- .../docker/docker/api/server/server_linux.go | 103 - .../docker/api/server/server_unit_test.go | 553 ---- .../docker/api/server/server_windows.go | 31 - .../docker/docker/api/types/stats.go | 87 - .../docker/docker/api/types/types.go | 20 - .../autogen/dockerversion/dockerversion.go | 11 - .../github.com/docker/docker/engine/engine.go | 255 -- .../docker/docker/engine/engine_test.go | 236 -- .../github.com/docker/docker/engine/env.go | 310 -- .../docker/docker/engine/env_test.go | 358 -- .../github.com/docker/docker/engine/hack.go | 21 - .../docker/docker/engine/helpers_test.go | 11 - .../github.com/docker/docker/engine/http.go | 42 - .../github.com/docker/docker/engine/job.go | 269 -- .../docker/docker/engine/job_test.go | 75 - .../docker/docker/engine/shutdown_test.go | 80 - .../docker/docker/engine/streams.go | 225 -- .../docker/docker/engine/streams_test.go | 215 -- .../github.com/docker/docker/engine/table.go | 140 - .../docker/docker/engine/table_test.go | 112 - .../src/github.com/docker/docker/nat/nat.go | 3 + .../src/github.com/docker/docker/opts/opts.go | 34 +- .../docker/docker/pkg/archive/archive.go | 69 +- .../docker/docker/pkg/archive/archive_test.go | 580 +++- .../docker/docker/pkg/archive/archive_unix.go | 4 +- .../pkg/archive/archive_windows_test.go | 2 +- .../docker/docker/pkg/archive/changes.go | 12 +- .../docker/docker/pkg/archive/changes_test.go | 152 + .../docker/docker/pkg/archive/wrap_test.go | 98 + .../docker/docker/pkg/common/randomid.go | 47 - .../docker/docker/pkg/common/randomid_test.go | 59 - .../docker/docker/pkg/fileutils/fileutils.go | 170 +- .../docker/pkg/fileutils/fileutils_test.go | 357 ++ .../docker/docker/pkg/ioutils/readers.go | 10 + .../docker/docker/pkg/ioutils/writers.go | 21 + .../docker/docker/pkg/ioutils/writers_test.go | 41 + .../docker/docker/pkg/mflag/flag.go | 51 +- .../docker/docker/pkg/mount/flags_freebsd.go | 23 +- .../docker/docker/pkg/mount/flags_linux.go | 93 +- .../docker/pkg/mount/flags_unsupported.go | 1 + .../docker/docker/pkg/mount/mount.go | 22 +- .../docker/docker/pkg/mount/mountinfo.go | 39 +- .../docker/pkg/mount/mountinfo_freebsd.go | 3 +- .../docker/pkg/mount/mountinfo_linux.go | 7 +- .../docker/pkg/mount/sharedsubtree_linux.go | 16 + .../docker/pkg/parsers/filters/parse.go | 3 +- .../docker/docker/pkg/pools/pools.go | 2 - .../docker/docker/pkg/pools/pools_nopool.go | 73 - .../pkg/progressreader/progressreader.go | 69 - .../docker/docker/pkg/system/lstat.go | 7 +- .../docker/docker/pkg/system/lstat_test.go | 1 + .../docker/docker/pkg/system/meminfo_linux.go | 8 +- .../docker/pkg/system/meminfo_linux_test.go | 1 + .../docker/docker/pkg/system/mknod.go | 2 + .../docker/docker/pkg/system/stat.go | 2 + .../docker/docker/pkg/system/stat_linux.go | 8 +- .../docker/docker/pkg/system/stat_test.go | 1 + .../docker/pkg/system/stat_unsupported.go | 1 + .../docker/docker/pkg/system/utimes_test.go | 1 + .../docker/docker/pkg/term/tc_linux_cgo.go | 47 - .../docker/docker/pkg/term/tc_other.go | 19 - .../github.com/docker/docker/pkg/term/term.go | 118 - .../docker/docker/pkg/term/term_windows.go | 137 - .../docker/docker/pkg/term/termios_darwin.go | 65 - .../docker/docker/pkg/term/termios_freebsd.go | 65 - .../docker/docker/pkg/term/termios_linux.go | 46 - .../pkg/term/winconsole/console_windows.go | 1042 ------ .../term/winconsole/console_windows_test.go | 232 -- .../pkg/term/winconsole/term_emulator.go | 218 -- .../pkg/term/winconsole/term_emulator_test.go | 388 --- .../docker/docker/pkg/timeutils/json.go | 26 - .../docker/docker/pkg/ulimit/ulimit.go | 2 +- .../docker/docker/pkg/ulimit/ulimit_test.go | 14 + .../docker/docker/pkg/units/size.go | 24 +- .../docker/docker/pkg/version/version.go | 63 - .../docker/docker/pkg/version/version_test.go | 27 - .../docker/docker/runconfig/compare.go | 21 +- .../docker/docker/runconfig/config.go | 164 +- .../docker/docker/runconfig/config_test.go | 40 +- .../docker/docker/runconfig/exec.go | 45 +- .../fixtures/container_config_1_14.json | 30 + .../fixtures/container_config_1_17.json | 49 + .../fixtures/container_config_1_19.json | 57 + .../docker/docker/runconfig/hostconfig.go | 197 +- .../docker/docker/runconfig/merge.go | 17 +- .../docker/docker/runconfig/parse.go | 47 +- .../docker/docker/runconfig/parse_test.go | 6 + .../github.com/docker/docker/utils/daemon.go | 36 - .../github.com/docker/docker/utils/flags.go | 45 - .../github.com/docker/docker/utils/http.go | 168 - .../docker/docker/utils/jsonmessage.go | 172 - .../docker/docker/utils/jsonmessage_test.go | 38 - .../docker/docker/utils/streamformatter.go | 121 - .../docker/utils/streamformatter_test.go | 67 - .../github.com/docker/docker/utils/tmpdir.go | 16 - .../github.com/docker/docker/utils/utils.go | 554 ---- .../docker/docker/utils/utils_daemon.go | 18 - .../docker/docker/utils/utils_daemon_test.go | 26 - .../docker/docker/utils/utils_test.go | 154 - .../docker/libtrust/CONTRIBUTING.md | 13 - .../src/github.com/docker/libtrust/LICENSE | 191 -- .../github.com/docker/libtrust/MAINTAINERS | 3 - .../src/github.com/docker/libtrust/README.md | 18 - .../docker/libtrust/certificates.go | 175 - .../docker/libtrust/certificates_test.go | 111 - .../src/github.com/docker/libtrust/doc.go | 9 - .../src/github.com/docker/libtrust/ec_key.go | 428 --- .../github.com/docker/libtrust/ec_key_test.go | 157 - .../src/github.com/docker/libtrust/filter.go | 50 - .../github.com/docker/libtrust/filter_test.go | 81 - .../src/github.com/docker/libtrust/hash.go | 56 - .../github.com/docker/libtrust/jsonsign.go | 657 ---- .../docker/libtrust/jsonsign_test.go | 380 --- .../src/github.com/docker/libtrust/key.go | 253 -- .../github.com/docker/libtrust/key_files.go | 255 -- .../docker/libtrust/key_files_test.go | 220 -- .../github.com/docker/libtrust/key_manager.go | 175 - .../github.com/docker/libtrust/key_test.go | 80 - .../src/github.com/docker/libtrust/rsa_key.go | 427 --- .../docker/libtrust/rsa_key_test.go | 157 - .../docker/libtrust/testutil/certificates.go | 94 - .../docker/libtrust/tlsdemo/README.md | 50 - .../docker/libtrust/tlsdemo/client.go | 89 - .../docker/libtrust/tlsdemo/gencert.go | 62 - .../docker/libtrust/tlsdemo/genkeys.go | 61 - .../docker/libtrust/tlsdemo/server.go | 80 - .../docker/libtrust/trustgraph/graph.go | 50 - .../libtrust/trustgraph/memory_graph.go | 133 - .../libtrust/trustgraph/memory_graph_test.go | 174 - .../docker/libtrust/trustgraph/statement.go | 227 -- .../libtrust/trustgraph/statement_test.go | 417 --- .../src/github.com/docker/libtrust/util.go | 361 -- .../github.com/docker/libtrust/util_test.go | 23 - .../rancher-compose/docker/factory.go | 19 +- .../rancher-compose/project/types.go | 108 +- 144 files changed, 2360 insertions(+), 18925 deletions(-) delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/README.md delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/api_unit_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/commands.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/common.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/server/server.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/server/server_linux.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/server/server_unit_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/server/server_windows.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/types/stats.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/api/types/types.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/autogen/dockerversion/dockerversion.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/engine/engine.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/engine/engine_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/engine/env.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/engine/env_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/engine/hack.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/engine/helpers_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/engine/http.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/engine/job.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/engine/job_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/engine/shutdown_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/engine/streams.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/engine/streams_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/engine/table.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/engine/table_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/common/randomid.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/common/randomid_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/progressreader/progressreader.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/console_windows.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/console_windows_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/term_emulator.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/term_emulator_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/pkg/version/version_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_config_1_14.json create mode 100644 Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_config_1_17.json create mode 100644 Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_config_1_19.json delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/daemon.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/flags.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/http.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/tmpdir.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/utils.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/utils_daemon.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/utils_daemon_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/docker/utils/utils_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/README.md delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/certificates.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/doc.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/filter.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/hash.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/key.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/key_files.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/key_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/util.go delete mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/util_test.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 06b596e3..96591a69 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -45,135 +45,90 @@ "ImportPath": "github.com/coreos/yaml", "Rev": "6b16a5714269b2f70720a45406b1babd947a17ef" }, - { - "ImportPath": "github.com/docker/docker/api", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" - }, - { - "ImportPath": "github.com/docker/docker/autogen/dockerversion", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" - }, - { - "ImportPath": "github.com/docker/docker/engine", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" - }, { "ImportPath": "github.com/docker/docker/nat", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/opts", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/pkg/archive", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" - }, - { - "ImportPath": "github.com/docker/docker/pkg/common", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/pkg/fileutils", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/pkg/homedir", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/pkg/ioutils", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/pkg/mflag", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/pkg/mount", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/pkg/parsers", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/pkg/pools", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" - }, - { - "ImportPath": "github.com/docker/docker/pkg/progressreader", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/pkg/promise", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/pkg/reexec", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/pkg/system", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" - }, - { - "ImportPath": "github.com/docker/docker/pkg/term", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" - }, - { - "ImportPath": "github.com/docker/docker/pkg/timeutils", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/pkg/ulimit", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/pkg/units", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" - }, - { - "ImportPath": "github.com/docker/docker/pkg/version", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/runconfig", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" - }, - { - "ImportPath": "github.com/docker/docker/utils", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar", - "Comment": "v1.4.1-1937-g8c315ba", - "Rev": "8c315ba9b34caef71298624028907bc26c2c8998" + "Comment": "v1.4.1-3121-g79d086c", + "Rev": "79d086c47d61f6da522a0e586005a02dae9b5ede" }, { "ImportPath": "github.com/docker/libcontainer/netlink", @@ -185,10 +140,6 @@ "Comment": "v1.4.0-127-gf4a4391", "Rev": "f4a4391e4ef7e886e56816ae59cbe99d8cff91d9" }, - { - "ImportPath": "github.com/docker/libtrust", - "Rev": "c54fbb67c1f1e68d7d6f8d2ad7c9360404616a41" - }, { "ImportPath": "github.com/docker/machine/utils", "Comment": "v0.1.0-rc3-18-gd674e87", @@ -221,13 +172,13 @@ }, { "ImportPath": "github.com/rancherio/rancher-compose/docker", - "Comment": "0.1.0-13-g0694d95", - "Rev": "0694d95502831b4d22422ff623889cf70bc017e2" + "Comment": "0.1.0-18-gac8e453", + "Rev": "ac8e4533c25f001e633ad85022235182601536dc" }, { "ImportPath": "github.com/rancherio/rancher-compose/project", - "Comment": "0.1.0-13-g0694d95", - "Rev": "0694d95502831b4d22422ff623889cf70bc017e2" + "Comment": "0.1.0-18-gac8e453", + "Rev": "ac8e4533c25f001e633ad85022235182601536dc" }, { "ImportPath": "github.com/ryanuber/go-glob", diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/README.md b/Godeps/_workspace/src/github.com/docker/docker/api/README.md deleted file mode 100644 index 453f61a1..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/README.md +++ /dev/null @@ -1,5 +0,0 @@ -This directory contains code pertaining to the Docker API: - - - Used by the docker client when communicating with the docker daemon - - - Used by third party tools wishing to interface with the docker daemon diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/api_unit_test.go b/Godeps/_workspace/src/github.com/docker/docker/api/api_unit_test.go deleted file mode 100644 index 678331d3..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/api_unit_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package api - -import ( - "testing" -) - -func TestJsonContentType(t *testing.T) { - if !MatchesContentType("application/json", "application/json") { - t.Fail() - } - - if !MatchesContentType("application/json; charset=utf-8", "application/json") { - t.Fail() - } - - if MatchesContentType("dockerapplication/json", "application/json") { - t.Fail() - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go deleted file mode 100644 index e0ab4191..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/cli.go +++ /dev/null @@ -1,185 +0,0 @@ -package client - -import ( - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io" - "net" - "net/http" - "os" - "reflect" - "strings" - "text/template" - "time" - - "github.com/docker/docker/pkg/homedir" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/registry" -) - -type DockerCli struct { - proto string - addr string - configFile *registry.ConfigFile - in io.ReadCloser - out io.Writer - err io.Writer - keyFile string - tlsConfig *tls.Config - scheme string - // inFd holds file descriptor of the client's STDIN, if it's a valid file - inFd uintptr - // outFd holds file descriptor of the client's STDOUT, if it's a valid file - outFd uintptr - // isTerminalIn describes if client's STDIN is a TTY - isTerminalIn bool - // isTerminalOut describes if client's STDOUT is a TTY - isTerminalOut bool - transport *http.Transport -} - -var funcMap = template.FuncMap{ - "json": func(v interface{}) string { - a, _ := json.Marshal(v) - return string(a) - }, -} - -func (cli *DockerCli) getMethod(args ...string) (func(...string) error, bool) { - camelArgs := make([]string, len(args)) - for i, s := range args { - if len(s) == 0 { - return nil, false - } - camelArgs[i] = strings.ToUpper(s[:1]) + strings.ToLower(s[1:]) - } - methodName := "Cmd" + strings.Join(camelArgs, "") - method := reflect.ValueOf(cli).MethodByName(methodName) - if !method.IsValid() { - return nil, false - } - return method.Interface().(func(...string) error), true -} - -// Cmd executes the specified command -func (cli *DockerCli) Cmd(args ...string) error { - if len(args) > 1 { - method, exists := cli.getMethod(args[:2]...) - if exists { - return method(args[2:]...) - } - } - if len(args) > 0 { - method, exists := cli.getMethod(args[0]) - if !exists { - fmt.Fprintf(cli.err, "docker: '%s' is not a docker command. See 'docker --help'.\n", args[0]) - os.Exit(1) - } - return method(args[1:]...) - } - return cli.CmdHelp() -} - -func (cli *DockerCli) Subcmd(name, signature, description string, exitOnError bool) *flag.FlagSet { - var errorHandling flag.ErrorHandling - if exitOnError { - errorHandling = flag.ExitOnError - } else { - errorHandling = flag.ContinueOnError - } - flags := flag.NewFlagSet(name, errorHandling) - flags.Usage = func() { - options := "" - if signature != "" { - signature = " " + signature - } - if flags.FlagCountUndeprecated() > 0 { - options = " [OPTIONS]" - } - fmt.Fprintf(cli.out, "\nUsage: docker %s%s%s\n\n%s\n\n", name, options, signature, description) - flags.SetOutput(cli.out) - flags.PrintDefaults() - os.Exit(0) - } - return flags -} - -func (cli *DockerCli) LoadConfigFile() (err error) { - cli.configFile, err = registry.LoadConfig(homedir.Get()) - if err != nil { - fmt.Fprintf(cli.err, "WARNING: %s\n", err) - } - return err -} - -func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { - // In order to attach to a container tty, input stream for the client must - // be a tty itself: redirecting or piping the client standard input is - // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. - if ttyMode && attachStdin && !cli.isTerminalIn { - return errors.New("cannot enable tty mode on non tty input") - } - return nil -} - -func NewDockerCli(in io.ReadCloser, out, err io.Writer, keyFile string, proto, addr string, tlsConfig *tls.Config) *DockerCli { - var ( - inFd uintptr - outFd uintptr - isTerminalIn = false - isTerminalOut = false - scheme = "http" - ) - - if tlsConfig != nil { - scheme = "https" - } - if in != nil { - inFd, isTerminalIn = term.GetFdInfo(in) - } - - if out != nil { - outFd, isTerminalOut = term.GetFdInfo(out) - } - - if err == nil { - err = out - } - - // The transport is created here for reuse during the client session - tr := &http.Transport{ - TLSClientConfig: tlsConfig, - } - - // Why 32? See issue 8035 - timeout := 32 * time.Second - if proto == "unix" { - // no need in compressing for local communications - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return net.DialTimeout(proto, addr, timeout) - } - } else { - tr.Proxy = http.ProxyFromEnvironment - tr.Dial = (&net.Dialer{Timeout: timeout}).Dial - } - - return &DockerCli{ - proto: proto, - addr: addr, - in: in, - out: out, - err: err, - keyFile: keyFile, - inFd: inFd, - outFd: outFd, - isTerminalIn: isTerminalIn, - isTerminalOut: isTerminalOut, - tlsConfig: tlsConfig, - scheme: scheme, - transport: tr, - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/commands.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/commands.go deleted file mode 100644 index 5689dfd2..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/commands.go +++ /dev/null @@ -1,2937 +0,0 @@ -package client - -import ( - "bufio" - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "os/exec" - "path" - "path/filepath" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "text/tabwriter" - "text/template" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/autogen/dockerversion" - "github.com/docker/docker/engine" - "github.com/docker/docker/graph" - "github.com/docker/docker/nat" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/common" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/homedir" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/parsers/filters" - "github.com/docker/docker/pkg/progressreader" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/resolvconf" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/pkg/timeutils" - "github.com/docker/docker/pkg/units" - "github.com/docker/docker/pkg/urlutil" - "github.com/docker/docker/registry" - "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" -) - -const ( - tarHeaderSize = 512 -) - -func (cli *DockerCli) CmdHelp(args ...string) error { - if len(args) > 1 { - method, exists := cli.getMethod(args[:2]...) - if exists { - method("--help") - return nil - } - } - if len(args) > 0 { - method, exists := cli.getMethod(args[0]) - if !exists { - fmt.Fprintf(cli.err, "docker: '%s' is not a docker command. See 'docker --help'.\n", args[0]) - os.Exit(1) - } else { - method("--help") - return nil - } - } - - flag.Usage() - - return nil -} - -func (cli *DockerCli) CmdBuild(args ...string) error { - cmd := cli.Subcmd("build", "PATH | URL | -", "Build a new image from the source code at PATH", true) - tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) for the image") - suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers") - noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image") - rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build") - forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers") - pull := cmd.Bool([]string{"-pull"}, false, "Always attempt to pull a newer version of the image") - dockerfileName := cmd.String([]string{"f", "-file"}, "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") - flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") - flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Total memory (memory + swap), '-1' to disable swap") - flCpuShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") - flCpuSetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") - - cmd.Require(flag.Exact, 1) - - utils.ParseFlags(cmd, args, true) - - var ( - context archive.Archive - isRemote bool - err error - ) - - _, err = exec.LookPath("git") - hasGit := err == nil - if cmd.Arg(0) == "-" { - // As a special case, 'docker build -' will build from either an empty context with the - // contents of stdin as a Dockerfile, or a tar-ed context from stdin. - buf := bufio.NewReader(cli.in) - magic, err := buf.Peek(tarHeaderSize) - if err != nil && err != io.EOF { - return fmt.Errorf("failed to peek context header from STDIN: %v", err) - } - if !archive.IsArchive(magic) { - dockerfile, err := ioutil.ReadAll(buf) - if err != nil { - return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err) - } - - // -f option has no meaning when we're reading it from stdin, - // so just use our default Dockerfile name - *dockerfileName = api.DefaultDockerfileName - context, err = archive.Generate(*dockerfileName, string(dockerfile)) - } else { - context = ioutil.NopCloser(buf) - } - } else if urlutil.IsURL(cmd.Arg(0)) && (!urlutil.IsGitURL(cmd.Arg(0)) || !hasGit) { - isRemote = true - } else { - root := cmd.Arg(0) - if urlutil.IsGitURL(root) { - remoteURL := cmd.Arg(0) - if !urlutil.IsGitTransport(remoteURL) { - remoteURL = "https://" + remoteURL - } - - root, err = ioutil.TempDir("", "docker-build-git") - if err != nil { - return err - } - defer os.RemoveAll(root) - - if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { - return fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } - } - if _, err := os.Stat(root); err != nil { - return err - } - - absRoot, err := filepath.Abs(root) - if err != nil { - return err - } - - filename := *dockerfileName // path to Dockerfile - - if *dockerfileName == "" { - // No -f/--file was specified so use the default - *dockerfileName = api.DefaultDockerfileName - filename = filepath.Join(absRoot, *dockerfileName) - - // Just to be nice ;-) look for 'dockerfile' too but only - // use it if we found it, otherwise ignore this check - if _, err = os.Lstat(filename); os.IsNotExist(err) { - tmpFN := path.Join(absRoot, strings.ToLower(*dockerfileName)) - if _, err = os.Lstat(tmpFN); err == nil { - *dockerfileName = strings.ToLower(*dockerfileName) - filename = tmpFN - } - } - } - - origDockerfile := *dockerfileName // used for error msg - if filename, err = filepath.Abs(filename); err != nil { - return err - } - - // Verify that 'filename' is within the build context - filename, err = symlink.FollowSymlinkInScope(filename, absRoot) - if err != nil { - return fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", origDockerfile, root) - } - - // Now reset the dockerfileName to be relative to the build context - *dockerfileName, err = filepath.Rel(absRoot, filename) - if err != nil { - return err - } - // And canonicalize dockerfile name to a platform-independent one - *dockerfileName, err = archive.CanonicalTarNameForPath(*dockerfileName) - if err != nil { - return fmt.Errorf("Cannot canonicalize dockerfile path %s: %v", dockerfileName, err) - } - - if _, err = os.Lstat(filename); os.IsNotExist(err) { - return fmt.Errorf("Cannot locate Dockerfile: %s", origDockerfile) - } - var includes = []string{"."} - - excludes, err := utils.ReadDockerIgnore(path.Join(root, ".dockerignore")) - if err != nil { - return err - } - - // If .dockerignore mentions .dockerignore or the Dockerfile - // then make sure we send both files over to the daemon - // because Dockerfile is, obviously, needed no matter what, and - // .dockerignore is needed to know if either one needs to be - // removed. The deamon will remove them for us, if needed, after it - // parses the Dockerfile. - keepThem1, _ := fileutils.Matches(".dockerignore", excludes) - keepThem2, _ := fileutils.Matches(*dockerfileName, excludes) - if keepThem1 || keepThem2 { - includes = append(includes, ".dockerignore", *dockerfileName) - } - - if err = utils.ValidateContextDirectory(root, excludes); err != nil { - return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err) - } - options := &archive.TarOptions{ - Compression: archive.Uncompressed, - ExcludePatterns: excludes, - IncludeFiles: includes, - } - context, err = archive.TarWithOptions(root, options) - if err != nil { - return err - } - } - - // windows: show error message about modified file permissions - // FIXME: this is not a valid warning when the daemon is running windows. should be removed once docker engine for windows can build. - if runtime.GOOS == "windows" { - log.Warn(`SECURITY WARNING: You are building a Docker image from Windows against a Linux Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) - } - - var body io.Reader - // Setup an upload progress bar - // FIXME: ProgressReader shouldn't be this annoying to use - if context != nil { - sf := utils.NewStreamFormatter(false) - body = progressreader.New(progressreader.Config{ - In: context, - Out: cli.out, - Formatter: sf, - NewLines: true, - ID: "", - Action: "Sending build context to Docker daemon", - }) - } - - var memory int64 - if *flMemoryString != "" { - parsedMemory, err := units.RAMInBytes(*flMemoryString) - if err != nil { - return err - } - memory = parsedMemory - } - - var memorySwap int64 - if *flMemorySwap != "" { - if *flMemorySwap == "-1" { - memorySwap = -1 - } else { - parsedMemorySwap, err := units.RAMInBytes(*flMemorySwap) - if err != nil { - return err - } - memorySwap = parsedMemorySwap - } - } - // Send the build context - v := &url.Values{} - - //Check if the given image name can be resolved - if *tag != "" { - repository, tag := parsers.ParseRepositoryTag(*tag) - if err := registry.ValidateRepositoryName(repository); err != nil { - return err - } - if len(tag) > 0 { - if err := graph.ValidateTagName(tag); err != nil { - return err - } - } - } - - v.Set("t", *tag) - - if *suppressOutput { - v.Set("q", "1") - } - if isRemote { - v.Set("remote", cmd.Arg(0)) - } - if *noCache { - v.Set("nocache", "1") - } - if *rm { - v.Set("rm", "1") - } else { - v.Set("rm", "0") - } - - if *forceRm { - v.Set("forcerm", "1") - } - - if *pull { - v.Set("pull", "1") - } - - v.Set("cpusetcpus", *flCpuSetCpus) - v.Set("cpushares", strconv.FormatInt(*flCpuShares, 10)) - v.Set("memory", strconv.FormatInt(memory, 10)) - v.Set("memswap", strconv.FormatInt(memorySwap, 10)) - - v.Set("dockerfile", *dockerfileName) - - cli.LoadConfigFile() - - headers := http.Header(make(map[string][]string)) - buf, err := json.Marshal(cli.configFile) - if err != nil { - return err - } - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - - if context != nil { - headers.Set("Content-Type", "application/tar") - } - err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers) - if jerr, ok := err.(*utils.JSONError); ok { - // If no error code is set, default to 1 - if jerr.Code == 0 { - jerr.Code = 1 - } - return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code} - } - return err -} - -// 'docker login': login / register a user to registry service. -func (cli *DockerCli) CmdLogin(args ...string) error { - cmd := cli.Subcmd("login", "[SERVER]", "Register or log in to a Docker registry server, if no server is\nspecified \""+registry.IndexServerAddress()+"\" is the default.", true) - cmd.Require(flag.Max, 1) - - var username, password, email string - - cmd.StringVar(&username, []string{"u", "-username"}, "", "Username") - cmd.StringVar(&password, []string{"p", "-password"}, "", "Password") - cmd.StringVar(&email, []string{"e", "-email"}, "", "Email") - - utils.ParseFlags(cmd, args, true) - - serverAddress := registry.IndexServerAddress() - if len(cmd.Args()) > 0 { - serverAddress = cmd.Arg(0) - } - - promptDefault := func(prompt string, configDefault string) { - if configDefault == "" { - fmt.Fprintf(cli.out, "%s: ", prompt) - } else { - fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) - } - } - - readInput := func(in io.Reader, out io.Writer) string { - reader := bufio.NewReader(in) - line, _, err := reader.ReadLine() - if err != nil { - fmt.Fprintln(out, err.Error()) - os.Exit(1) - } - return string(line) - } - - cli.LoadConfigFile() - authconfig, ok := cli.configFile.Configs[serverAddress] - if !ok { - authconfig = registry.AuthConfig{} - } - - if username == "" { - promptDefault("Username", authconfig.Username) - username = readInput(cli.in, cli.out) - username = strings.Trim(username, " ") - if username == "" { - username = authconfig.Username - } - } - // Assume that a different username means they may not want to use - // the password or email from the config file, so prompt them - if username != authconfig.Username { - if password == "" { - oldState, err := term.SaveState(cli.inFd) - if err != nil { - return err - } - fmt.Fprintf(cli.out, "Password: ") - term.DisableEcho(cli.inFd, oldState) - - password = readInput(cli.in, cli.out) - fmt.Fprint(cli.out, "\n") - - term.RestoreTerminal(cli.inFd, oldState) - if password == "" { - return fmt.Errorf("Error : Password Required") - } - } - - if email == "" { - promptDefault("Email", authconfig.Email) - email = readInput(cli.in, cli.out) - if email == "" { - email = authconfig.Email - } - } - } else { - // However, if they don't override the username use the - // password or email from the cmd line if specified. IOW, allow - // then to change/override them. And if not specified, just - // use what's in the config file - if password == "" { - password = authconfig.Password - } - if email == "" { - email = authconfig.Email - } - } - authconfig.Username = username - authconfig.Password = password - authconfig.Email = email - authconfig.ServerAddress = serverAddress - cli.configFile.Configs[serverAddress] = authconfig - - stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false) - if statusCode == 401 { - delete(cli.configFile.Configs, serverAddress) - registry.SaveConfig(cli.configFile) - return err - } - if err != nil { - return err - } - var out2 engine.Env - err = out2.Decode(stream) - if err != nil { - cli.configFile, _ = registry.LoadConfig(homedir.Get()) - return err - } - registry.SaveConfig(cli.configFile) - fmt.Fprintf(cli.out, "WARNING: login credentials saved in %s.\n", path.Join(homedir.Get(), registry.CONFIGFILE)) - - if out2.Get("Status") != "" { - fmt.Fprintf(cli.out, "%s\n", out2.Get("Status")) - } - return nil -} - -// log out from a Docker registry -func (cli *DockerCli) CmdLogout(args ...string) error { - cmd := cli.Subcmd("logout", "[SERVER]", "Log out from a Docker registry, if no server is\nspecified \""+registry.IndexServerAddress()+"\" is the default.", true) - cmd.Require(flag.Max, 1) - - utils.ParseFlags(cmd, args, false) - serverAddress := registry.IndexServerAddress() - if len(cmd.Args()) > 0 { - serverAddress = cmd.Arg(0) - } - - cli.LoadConfigFile() - if _, ok := cli.configFile.Configs[serverAddress]; !ok { - fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress) - } else { - fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress) - delete(cli.configFile.Configs, serverAddress) - - if err := registry.SaveConfig(cli.configFile); err != nil { - return fmt.Errorf("Failed to save docker config: %v", err) - } - } - return nil -} - -// 'docker wait': block until a container stops -func (cli *DockerCli) CmdWait(args ...string) error { - cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.", true) - cmd.Require(flag.Min, 1) - - utils.ParseFlags(cmd, args, true) - - var encounteredError error - for _, name := range cmd.Args() { - status, err := waitForExit(cli, name) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to wait one or more containers") - } else { - fmt.Fprintf(cli.out, "%d\n", status) - } - } - return encounteredError -} - -// 'docker version': show version information -func (cli *DockerCli) CmdVersion(args ...string) error { - cmd := cli.Subcmd("version", "", "Show the Docker version information.", true) - cmd.Require(flag.Exact, 0) - - utils.ParseFlags(cmd, args, false) - - if dockerversion.VERSION != "" { - fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION) - } - fmt.Fprintf(cli.out, "Client API version: %s\n", api.APIVERSION) - fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version()) - if dockerversion.GITCOMMIT != "" { - fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT) - } - fmt.Fprintf(cli.out, "OS/Arch (client): %s/%s\n", runtime.GOOS, runtime.GOARCH) - - body, _, err := readBody(cli.call("GET", "/version", nil, false)) - if err != nil { - return err - } - - out := engine.NewOutput() - remoteVersion, err := out.AddEnv() - if err != nil { - log.Errorf("Error reading remote version: %s", err) - return err - } - if _, err := out.Write(body); err != nil { - log.Errorf("Error reading remote version: %s", err) - return err - } - out.Close() - fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version")) - if apiVersion := remoteVersion.Get("ApiVersion"); apiVersion != "" { - fmt.Fprintf(cli.out, "Server API version: %s\n", apiVersion) - } - fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion")) - fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit")) - fmt.Fprintf(cli.out, "OS/Arch (server): %s/%s\n", remoteVersion.Get("Os"), remoteVersion.Get("Arch")) - return nil -} - -// 'docker info': display system-wide information. -func (cli *DockerCli) CmdInfo(args ...string) error { - cmd := cli.Subcmd("info", "", "Display system-wide information", true) - cmd.Require(flag.Exact, 0) - utils.ParseFlags(cmd, args, false) - - body, _, err := readBody(cli.call("GET", "/info", nil, false)) - if err != nil { - return err - } - - out := engine.NewOutput() - remoteInfo, err := out.AddEnv() - if err != nil { - return err - } - - if _, err := out.Write(body); err != nil { - log.Errorf("Error reading remote info: %s", err) - return err - } - out.Close() - - if remoteInfo.Exists("Containers") { - fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers")) - } - if remoteInfo.Exists("Images") { - fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images")) - } - if remoteInfo.Exists("Driver") { - fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver")) - } - if remoteInfo.Exists("DriverStatus") { - var driverStatus [][2]string - if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil { - return err - } - for _, pair := range driverStatus { - fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) - } - } - if remoteInfo.Exists("ExecutionDriver") { - fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver")) - } - if remoteInfo.Exists("KernelVersion") { - fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion")) - } - if remoteInfo.Exists("OperatingSystem") { - fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem")) - } - if remoteInfo.Exists("NCPU") { - fmt.Fprintf(cli.out, "CPUs: %d\n", remoteInfo.GetInt("NCPU")) - } - if remoteInfo.Exists("MemTotal") { - fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(remoteInfo.GetInt64("MemTotal")))) - } - if remoteInfo.Exists("Name") { - fmt.Fprintf(cli.out, "Name: %s\n", remoteInfo.Get("Name")) - } - if remoteInfo.Exists("ID") { - fmt.Fprintf(cli.out, "ID: %s\n", remoteInfo.Get("ID")) - } - - if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" { - if remoteInfo.Exists("Debug") { - fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug")) - } - fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "") - if remoteInfo.Exists("NFd") { - fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd")) - } - if remoteInfo.Exists("NGoroutines") { - fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines")) - } - if remoteInfo.Exists("SystemTime") { - t, err := remoteInfo.GetTime("SystemTime") - if err != nil { - log.Errorf("Error reading system time: %v", err) - } else { - fmt.Fprintf(cli.out, "System Time: %s\n", t.Format(time.UnixDate)) - } - } - if remoteInfo.Exists("NEventsListener") { - fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener")) - } - if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" { - fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1) - } - if initPath := remoteInfo.Get("InitPath"); initPath != "" { - fmt.Fprintf(cli.out, "Init Path: %s\n", initPath) - } - if root := remoteInfo.Get("DockerRootDir"); root != "" { - fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", root) - } - } - if remoteInfo.Exists("HttpProxy") { - fmt.Fprintf(cli.out, "Http Proxy: %s\n", remoteInfo.Get("HttpProxy")) - } - if remoteInfo.Exists("HttpsProxy") { - fmt.Fprintf(cli.out, "Https Proxy: %s\n", remoteInfo.Get("HttpsProxy")) - } - if remoteInfo.Exists("NoProxy") { - fmt.Fprintf(cli.out, "No Proxy: %s\n", remoteInfo.Get("NoProxy")) - } - if len(remoteInfo.GetList("IndexServerAddress")) != 0 { - cli.LoadConfigFile() - u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username - if len(u) > 0 { - fmt.Fprintf(cli.out, "Username: %v\n", u) - fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress")) - } - } - if remoteInfo.Exists("MemoryLimit") && !remoteInfo.GetBool("MemoryLimit") { - fmt.Fprintf(cli.err, "WARNING: No memory limit support\n") - } - if remoteInfo.Exists("SwapLimit") && !remoteInfo.GetBool("SwapLimit") { - fmt.Fprintf(cli.err, "WARNING: No swap limit support\n") - } - if remoteInfo.Exists("IPv4Forwarding") && !remoteInfo.GetBool("IPv4Forwarding") { - fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n") - } - if remoteInfo.Exists("Labels") { - fmt.Fprintln(cli.out, "Labels:") - for _, attribute := range remoteInfo.GetList("Labels") { - fmt.Fprintf(cli.out, " %s\n", attribute) - } - } - - return nil -} - -func (cli *DockerCli) CmdStop(args ...string) error { - cmd := cli.Subcmd("stop", "CONTAINER [CONTAINER...]", "Stop a running container by sending SIGTERM and then SIGKILL after a\ngrace period", true) - nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing it") - cmd.Require(flag.Min, 1) - - utils.ParseFlags(cmd, args, true) - - v := url.Values{} - v.Set("t", strconv.Itoa(*nSeconds)) - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to stop one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -func (cli *DockerCli) CmdRestart(args ...string) error { - cmd := cli.Subcmd("restart", "CONTAINER [CONTAINER...]", "Restart a running container", true) - nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Seconds to wait for stop before killing the container") - cmd.Require(flag.Min, 1) - - utils.ParseFlags(cmd, args, true) - - v := url.Values{} - v.Set("t", strconv.Itoa(*nSeconds)) - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to restart one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal { - sigc := make(chan os.Signal, 128) - signal.CatchAll(sigc) - go func() { - for s := range sigc { - if s == signal.SIGCHLD { - continue - } - var sig string - for sigStr, sigN := range signal.SignalMap { - if sigN == s { - sig = sigStr - break - } - } - if sig == "" { - log.Errorf("Unsupported signal: %v. Discarding.", s) - } - if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil { - log.Debugf("Error sending signal: %s", err) - } - } - }() - return sigc -} - -func (cli *DockerCli) CmdStart(args ...string) error { - var ( - cErr chan error - tty bool - - cmd = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Start one or more stopped containers", true) - attach = cmd.Bool([]string{"a", "-attach"}, false, "Attach STDOUT/STDERR and forward signals") - openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN") - ) - - cmd.Require(flag.Min, 1) - utils.ParseFlags(cmd, args, true) - - if *attach || *openStdin { - if cmd.NArg() > 1 { - return fmt.Errorf("You cannot start and attach multiple containers at once.") - } - - stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) - if err != nil { - return err - } - - env := engine.Env{} - if err := env.Decode(stream); err != nil { - return err - } - config := env.GetSubEnv("Config") - tty = config.GetBool("Tty") - - if !tty { - sigc := cli.forwardAllSignals(cmd.Arg(0)) - defer signal.StopCatch(sigc) - } - - var in io.ReadCloser - - v := url.Values{} - v.Set("stream", "1") - - if *openStdin && config.GetBool("OpenStdin") { - v.Set("stdin", "1") - in = cli.in - } - - v.Set("stdout", "1") - v.Set("stderr", "1") - - hijacked := make(chan io.Closer) - // Block the return until the chan gets closed - defer func() { - log.Debugf("CmdStart() returned, defer waiting for hijack to finish.") - if _, ok := <-hijacked; ok { - log.Errorf("Hijack did not finish (chan still open)") - } - cli.in.Close() - }() - cErr = promise.Go(func() error { - return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, hijacked, nil) - }) - - // Acknowledge the hijack before starting - select { - case closer := <-hijacked: - // Make sure that the hijack gets closed when returning (results - // in closing the hijack chan and freeing server's goroutines) - if closer != nil { - defer closer.Close() - } - case err := <-cErr: - if err != nil { - return err - } - } - } - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false)) - if err != nil { - if !*attach && !*openStdin { - // attach and openStdin is false means it could be starting multiple containers - // when a container start failed, show the error message and start next - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to start one or more containers") - } else { - encounteredError = err - } - } else { - if !*attach && !*openStdin { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - } - - if encounteredError != nil { - return encounteredError - } - - if *openStdin || *attach { - if tty && cli.isTerminalOut { - if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { - log.Errorf("Error monitoring TTY size: %s", err) - } - } - if attchErr := <-cErr; attchErr != nil { - return attchErr - } - _, status, err := getExitCode(cli, cmd.Arg(0)) - if err != nil { - return err - } - if status != 0 { - return &utils.StatusError{StatusCode: status} - } - } - return nil -} - -func (cli *DockerCli) CmdUnpause(args ...string) error { - cmd := cli.Subcmd("unpause", "CONTAINER [CONTAINER...]", "Unpause all processes within a container", true) - cmd.Require(flag.Min, 1) - utils.ParseFlags(cmd, args, false) - - var encounteredError error - for _, name := range cmd.Args() { - if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/unpause", name), nil, false)); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to unpause container named %s", name) - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -func (cli *DockerCli) CmdPause(args ...string) error { - cmd := cli.Subcmd("pause", "CONTAINER [CONTAINER...]", "Pause all processes within a container", true) - cmd.Require(flag.Min, 1) - utils.ParseFlags(cmd, args, false) - - var encounteredError error - for _, name := range cmd.Args() { - if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/pause", name), nil, false)); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to pause container named %s", name) - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -func (cli *DockerCli) CmdRename(args ...string) error { - cmd := cli.Subcmd("rename", "OLD_NAME NEW_NAME", "Rename a container", true) - if err := cmd.Parse(args); err != nil { - return nil - } - - if cmd.NArg() != 2 { - cmd.Usage() - return nil - } - old_name := cmd.Arg(0) - new_name := cmd.Arg(1) - - if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/rename?name=%s", old_name, new_name), nil, false)); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - return fmt.Errorf("Error: failed to rename container named %s", old_name) - } - return nil -} - -func (cli *DockerCli) CmdInspect(args ...string) error { - cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container or image", true) - tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template") - cmd.Require(flag.Min, 1) - - utils.ParseFlags(cmd, args, true) - - var tmpl *template.Template - if *tmplStr != "" { - var err error - if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil { - fmt.Fprintf(cli.err, "Template parsing error: %v\n", err) - return &utils.StatusError{StatusCode: 64, - Status: "Template parsing error: " + err.Error()} - } - } - - indented := new(bytes.Buffer) - indented.WriteByte('[') - status := 0 - - for _, name := range cmd.Args() { - obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false)) - if err != nil { - if strings.Contains(err.Error(), "Too many") { - fmt.Fprintf(cli.err, "Error: %v", err) - status = 1 - continue - } - - obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false)) - if err != nil { - if strings.Contains(err.Error(), "No such") { - fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name) - } else { - fmt.Fprintf(cli.err, "%s", err) - } - status = 1 - continue - } - } - - if tmpl == nil { - if err = json.Indent(indented, obj, "", " "); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - status = 1 - continue - } - } else { - // Has template, will render - var value interface{} - if err := json.Unmarshal(obj, &value); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - status = 1 - continue - } - if err := tmpl.Execute(cli.out, value); err != nil { - return err - } - cli.out.Write([]byte{'\n'}) - } - indented.WriteString(",") - } - - if indented.Len() > 1 { - // Remove trailing ',' - indented.Truncate(indented.Len() - 1) - } - indented.WriteString("]\n") - - if tmpl == nil { - if _, err := io.Copy(cli.out, indented); err != nil { - return err - } - } - - if status != 0 { - return &utils.StatusError{StatusCode: status} - } - return nil -} - -func (cli *DockerCli) CmdTop(args ...string) error { - cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Display the running processes of a container", true) - cmd.Require(flag.Min, 1) - - utils.ParseFlags(cmd, args, true) - - val := url.Values{} - if cmd.NArg() > 1 { - val.Set("ps_args", strings.Join(cmd.Args()[1:], " ")) - } - - stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false) - if err != nil { - return err - } - var procs engine.Env - if err := procs.Decode(stream); err != nil { - return err - } - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t")) - processes := [][]string{} - if err := procs.GetJson("Processes", &processes); err != nil { - return err - } - for _, proc := range processes { - fmt.Fprintln(w, strings.Join(proc, "\t")) - } - w.Flush() - return nil -} - -func (cli *DockerCli) CmdPort(args ...string) error { - cmd := cli.Subcmd("port", "CONTAINER [PRIVATE_PORT[/PROTO]]", "List port mappings for the CONTAINER, or lookup the public-facing port that\nis NAT-ed to the PRIVATE_PORT", true) - cmd.Require(flag.Min, 1) - utils.ParseFlags(cmd, args, true) - - stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false) - if err != nil { - return err - } - - env := engine.Env{} - if err := env.Decode(stream); err != nil { - return err - } - ports := nat.PortMap{} - if err := env.GetSubEnv("NetworkSettings").GetJson("Ports", &ports); err != nil { - return err - } - - if cmd.NArg() == 2 { - var ( - port = cmd.Arg(1) - proto = "tcp" - parts = strings.SplitN(port, "/", 2) - ) - - if len(parts) == 2 && len(parts[1]) != 0 { - port = parts[0] - proto = parts[1] - } - natPort := port + "/" + proto - if frontends, exists := ports[nat.Port(port+"/"+proto)]; exists && frontends != nil { - for _, frontend := range frontends { - fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort) - } - return nil - } - return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0)) - } - - for from, frontends := range ports { - for _, frontend := range frontends { - fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIp, frontend.HostPort) - } - } - - return nil -} - -// 'docker rmi IMAGE' removes all images with the name IMAGE -func (cli *DockerCli) CmdRmi(args ...string) error { - var ( - cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images", true) - force = cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image") - noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents") - ) - cmd.Require(flag.Min, 1) - - utils.ParseFlags(cmd, args, true) - - v := url.Values{} - if *force { - v.Set("force", "1") - } - if *noprune { - v.Set("noprune", "1") - } - - var encounteredError error - for _, name := range cmd.Args() { - body, _, err := readBody(cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to remove one or more images") - } else { - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to remove one or more images") - continue - } - for _, out := range outs.Data { - if out.Get("Deleted") != "" { - fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted")) - } else { - fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged")) - } - } - } - } - return encounteredError -} - -func (cli *DockerCli) CmdHistory(args ...string) error { - cmd := cli.Subcmd("history", "IMAGE", "Show the history of an image", true) - quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") - noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - cmd.Require(flag.Exact, 1) - - utils.ParseFlags(cmd, args, true) - - body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, false)) - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - if !*quiet { - fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE") - } - - for _, out := range outs.Data { - outID := out.Get("Id") - if !*quiet { - if *noTrunc { - fmt.Fprintf(w, "%s\t", outID) - } else { - fmt.Fprintf(w, "%s\t", common.TruncateID(outID)) - } - - fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0)))) - - if *noTrunc { - fmt.Fprintf(w, "%s\t", out.Get("CreatedBy")) - } else { - fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45)) - } - fmt.Fprintf(w, "%s\n", units.HumanSize(float64(out.GetInt64("Size")))) - } else { - if *noTrunc { - fmt.Fprintln(w, outID) - } else { - fmt.Fprintln(w, common.TruncateID(outID)) - } - } - } - w.Flush() - return nil -} - -func (cli *DockerCli) CmdRm(args ...string) error { - cmd := cli.Subcmd("rm", "CONTAINER [CONTAINER...]", "Remove one or more containers", true) - v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container") - link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link") - force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)") - cmd.Require(flag.Min, 1) - - utils.ParseFlags(cmd, args, true) - - val := url.Values{} - if *v { - val.Set("v", "1") - } - if *link { - val.Set("link", "1") - } - - if *force { - val.Set("force", "1") - } - - var encounteredError error - for _, name := range cmd.Args() { - _, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, false)) - if err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to remove one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -// 'docker kill NAME' kills a running container -func (cli *DockerCli) CmdKill(args ...string) error { - cmd := cli.Subcmd("kill", "CONTAINER [CONTAINER...]", "Kill a running container using SIGKILL or a specified signal", true) - signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container") - cmd.Require(flag.Min, 1) - - utils.ParseFlags(cmd, args, true) - - var encounteredError error - for _, name := range cmd.Args() { - if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, false)); err != nil { - fmt.Fprintf(cli.err, "%s\n", err) - encounteredError = fmt.Errorf("Error: failed to kill one or more containers") - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - return encounteredError -} - -func (cli *DockerCli) CmdImport(args ...string) error { - cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the\ntarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then\noptionally tag it.", true) - flChanges := opts.NewListOpts(nil) - cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") - cmd.Require(flag.Min, 1) - - utils.ParseFlags(cmd, args, true) - - var ( - v = url.Values{} - src = cmd.Arg(0) - repository = cmd.Arg(1) - ) - - v.Set("fromSrc", src) - v.Set("repo", repository) - for _, change := range flChanges.GetAll() { - v.Add("changes", change) - } - if cmd.NArg() == 3 { - fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' has been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n") - v.Set("tag", cmd.Arg(2)) - } - - if repository != "" { - //Check if the given image name can be resolved - repo, _ := parsers.ParseRepositoryTag(repository) - if err := registry.ValidateRepositoryName(repo); err != nil { - return err - } - } - - var in io.Reader - - if src == "-" { - in = cli.in - } - - return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil) -} - -func (cli *DockerCli) CmdPush(args ...string) error { - cmd := cli.Subcmd("push", "NAME[:TAG]", "Push an image or a repository to the registry", true) - cmd.Require(flag.Exact, 1) - - utils.ParseFlags(cmd, args, true) - - name := cmd.Arg(0) - - cli.LoadConfigFile() - - remote, tag := parsers.ParseRepositoryTag(name) - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(remote) - if err != nil { - return err - } - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(repoInfo.Index) - // If we're not using a custom registry, we know the restrictions - // applied to repository names and can warn the user in advance. - // Custom repositories can have different rules, and we must also - // allow pushing by image ID. - if repoInfo.Official { - username := authConfig.Username - if username == "" { - username = "" - } - return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository to / (ex: %s/%s)", username, repoInfo.LocalName) - } - - v := url.Values{} - v.Set("tag", tag) - - push := func(authConfig registry.AuthConfig) error { - buf, err := json.Marshal(authConfig) - if err != nil { - return err - } - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - - return cli.stream("POST", "/images/"+remote+"/push?"+v.Encode(), nil, cli.out, map[string][]string{ - "X-Registry-Auth": registryAuthHeader, - }) - } - - if err := push(authConfig); err != nil { - if strings.Contains(err.Error(), "Status 401") { - fmt.Fprintln(cli.out, "\nPlease login prior to push:") - if err := cli.CmdLogin(repoInfo.Index.GetAuthConfigKey()); err != nil { - return err - } - authConfig := cli.configFile.ResolveAuthConfig(repoInfo.Index) - return push(authConfig) - } - return err - } - return nil -} - -func (cli *DockerCli) CmdPull(args ...string) error { - cmd := cli.Subcmd("pull", "NAME[:TAG|@DIGEST]", "Pull an image or a repository from the registry", true) - allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository") - cmd.Require(flag.Exact, 1) - - utils.ParseFlags(cmd, args, true) - - var ( - v = url.Values{} - remote = cmd.Arg(0) - newRemote = remote - ) - taglessRemote, tag := parsers.ParseRepositoryTag(remote) - if tag == "" && !*allTags { - newRemote = utils.ImageReference(taglessRemote, graph.DEFAULTTAG) - } - if tag != "" && *allTags { - return fmt.Errorf("tag can't be used with --all-tags/-a") - } - - v.Set("fromImage", newRemote) - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(taglessRemote) - if err != nil { - return err - } - - cli.LoadConfigFile() - - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(repoInfo.Index) - - pull := func(authConfig registry.AuthConfig) error { - buf, err := json.Marshal(authConfig) - if err != nil { - return err - } - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - - return cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{ - "X-Registry-Auth": registryAuthHeader, - }) - } - - if err := pull(authConfig); err != nil { - if strings.Contains(err.Error(), "Status 401") { - fmt.Fprintln(cli.out, "\nPlease login prior to pull:") - if err := cli.CmdLogin(repoInfo.Index.GetAuthConfigKey()); err != nil { - return err - } - authConfig := cli.configFile.ResolveAuthConfig(repoInfo.Index) - return pull(authConfig) - } - return err - } - - return nil -} - -func (cli *DockerCli) CmdImages(args ...string) error { - cmd := cli.Subcmd("images", "[REPOSITORY]", "List images", true) - quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs") - all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (default hides intermediate images)") - noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - showDigests := cmd.Bool([]string{"-digests"}, false, "Show digests") - // FIXME: --viz and --tree are deprecated. Remove them in a future version. - flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format") - flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format") - - flFilter := opts.NewListOpts(nil) - cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") - cmd.Require(flag.Max, 1) - - utils.ParseFlags(cmd, args, true) - - // Consolidate all filter flags, and sanity check them early. - // They'll get process in the daemon/server. - imageFilterArgs := filters.Args{} - for _, f := range flFilter.GetAll() { - var err error - imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) - if err != nil { - return err - } - } - - matchName := cmd.Arg(0) - // FIXME: --viz and --tree are deprecated. Remove them in a future version. - if *flViz || *flTree { - v := url.Values{ - "all": []string{"1"}, - } - if len(imageFilterArgs) > 0 { - filterJson, err := filters.ToParam(imageFilterArgs) - if err != nil { - return err - } - v.Set("filters", filterJson) - } - - body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false)) - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - - var ( - printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string) - startImage *engine.Env - - roots = engine.NewTable("Created", outs.Len()) - byParent = make(map[string]*engine.Table) - ) - - for _, image := range outs.Data { - if image.Get("ParentId") == "" { - roots.Add(image) - } else { - if children, exists := byParent[image.Get("ParentId")]; exists { - children.Add(image) - } else { - byParent[image.Get("ParentId")] = engine.NewTable("Created", 1) - byParent[image.Get("ParentId")].Add(image) - } - } - - if matchName != "" { - if matchName == image.Get("Id") || matchName == common.TruncateID(image.Get("Id")) { - startImage = image - } - - for _, repotag := range image.GetList("RepoTags") { - if repotag == matchName { - startImage = image - } - } - } - } - - if *flViz { - fmt.Fprintf(cli.out, "digraph docker {\n") - printNode = (*DockerCli).printVizNode - } else { - printNode = (*DockerCli).printTreeNode - } - - if startImage != nil { - root := engine.NewTable("Created", 1) - root.Add(startImage) - cli.WalkTree(*noTrunc, root, byParent, "", printNode) - } else if matchName == "" { - cli.WalkTree(*noTrunc, roots, byParent, "", printNode) - } - if *flViz { - fmt.Fprintf(cli.out, " base [style=invisible]\n}\n") - } - } else { - v := url.Values{} - if len(imageFilterArgs) > 0 { - filterJson, err := filters.ToParam(imageFilterArgs) - if err != nil { - return err - } - v.Set("filters", filterJson) - } - - if cmd.NArg() == 1 { - // FIXME rename this parameter, to not be confused with the filters flag - v.Set("filter", matchName) - } - if *all { - v.Set("all", "1") - } - - body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false)) - - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - if !*quiet { - if *showDigests { - fmt.Fprintln(w, "REPOSITORY\tTAG\tDIGEST\tIMAGE ID\tCREATED\tVIRTUAL SIZE") - } else { - fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE") - } - } - - for _, out := range outs.Data { - outID := out.Get("Id") - if !*noTrunc { - outID = common.TruncateID(outID) - } - - repoTags := out.GetList("RepoTags") - repoDigests := out.GetList("RepoDigests") - - if len(repoTags) == 1 && repoTags[0] == ":" && len(repoDigests) == 1 && repoDigests[0] == "@" { - // dangling image - clear out either repoTags or repoDigsts so we only show it once below - repoDigests = []string{} - } - - // combine the tags and digests lists - tagsAndDigests := append(repoTags, repoDigests...) - for _, repoAndRef := range tagsAndDigests { - repo, ref := parsers.ParseRepositoryTag(repoAndRef) - // default tag and digest to none - if there's a value, it'll be set below - tag := "" - digest := "" - if utils.DigestReference(ref) { - digest = ref - } else { - tag = ref - } - - if !*quiet { - if *showDigests { - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s ago\t%s\n", repo, tag, digest, outID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), units.HumanSize(float64(out.GetInt64("VirtualSize")))) - } else { - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), units.HumanSize(float64(out.GetInt64("VirtualSize")))) - } - } else { - fmt.Fprintln(w, outID) - } - } - } - - if !*quiet { - w.Flush() - } - } - return nil -} - -// FIXME: --viz and --tree are deprecated. Remove them in a future version. -func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) { - length := images.Len() - if length > 1 { - for index, image := range images.Data { - if index+1 == length { - printNode(cli, noTrunc, image, prefix+"└─") - if subimages, exists := byParent[image.Get("Id")]; exists { - cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) - } - } else { - printNode(cli, noTrunc, image, prefix+"\u251C─") - if subimages, exists := byParent[image.Get("Id")]; exists { - cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode) - } - } - } - } else { - for _, image := range images.Data { - printNode(cli, noTrunc, image, prefix+"└─") - if subimages, exists := byParent[image.Get("Id")]; exists { - cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode) - } - } - } -} - -// FIXME: --viz and --tree are deprecated. Remove them in a future version. -func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) { - var ( - imageID string - parentID string - ) - if noTrunc { - imageID = image.Get("Id") - parentID = image.Get("ParentId") - } else { - imageID = common.TruncateID(image.Get("Id")) - parentID = common.TruncateID(image.Get("ParentId")) - } - if parentID == "" { - fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID) - } else { - fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID) - } - if image.GetList("RepoTags")[0] != ":" { - fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n", - imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n")) - } -} - -// FIXME: --viz and --tree are deprecated. Remove them in a future version. -func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) { - var imageID string - if noTrunc { - imageID = image.Get("Id") - } else { - imageID = common.TruncateID(image.Get("Id")) - } - - fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(float64(image.GetInt64("VirtualSize")))) - if image.GetList("RepoTags")[0] != ":" { - fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", ")) - } else { - fmt.Fprint(cli.out, "\n") - } -} - -func (cli *DockerCli) CmdPs(args ...string) error { - var ( - err error - - psFilterArgs = filters.Args{} - v = url.Values{} - - cmd = cli.Subcmd("ps", "", "List containers", true) - quiet = cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs") - size = cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes") - all = cmd.Bool([]string{"a", "-all"}, false, "Show all containers (default shows just running)") - noTrunc = cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - nLatest = cmd.Bool([]string{"l", "-latest"}, false, "Show the latest created container, include non-running") - since = cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show created since Id or Name, include non-running") - before = cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name") - last = cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running") - flFilter = opts.NewListOpts(nil) - ) - cmd.Require(flag.Exact, 0) - - cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") - - utils.ParseFlags(cmd, args, true) - if *last == -1 && *nLatest { - *last = 1 - } - - if *all { - v.Set("all", "1") - } - - if *last != -1 { - v.Set("limit", strconv.Itoa(*last)) - } - - if *since != "" { - v.Set("since", *since) - } - - if *before != "" { - v.Set("before", *before) - } - - if *size { - v.Set("size", "1") - } - - // Consolidate all filter flags, and sanity check them. - // They'll get processed in the daemon/server. - for _, f := range flFilter.GetAll() { - if psFilterArgs, err = filters.ParseFlag(f, psFilterArgs); err != nil { - return err - } - } - - if len(psFilterArgs) > 0 { - filterJson, err := filters.ToParam(psFilterArgs) - if err != nil { - return err - } - - v.Set("filters", filterJson) - } - - body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false)) - if err != nil { - return err - } - - outs := engine.NewTable("Created", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - - w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - if !*quiet { - fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES") - - if *size { - fmt.Fprintln(w, "\tSIZE") - } else { - fmt.Fprint(w, "\n") - } - } - - stripNamePrefix := func(ss []string) []string { - for i, s := range ss { - ss[i] = s[1:] - } - - return ss - } - - for _, out := range outs.Data { - outID := out.Get("Id") - - if !*noTrunc { - outID = common.TruncateID(outID) - } - - if *quiet { - fmt.Fprintln(w, outID) - - continue - } - - var ( - outNames = stripNamePrefix(out.GetList("Names")) - outCommand = strconv.Quote(out.Get("Command")) - ports = engine.NewTable("", 0) - ) - - if !*noTrunc { - outCommand = utils.Trunc(outCommand, 20) - - // only display the default name for the container with notrunc is passed - for _, name := range outNames { - if len(strings.Split(name, "/")) == 1 { - outNames = []string{name} - - break - } - } - } - - ports.ReadListFrom([]byte(out.Get("Ports"))) - - image := out.Get("Image") - if image == "" { - image = "" - } - - fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, image, outCommand, - units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), - out.Get("Status"), api.DisplayablePorts(ports), strings.Join(outNames, ",")) - - if *size { - if out.GetInt("SizeRootFs") > 0 { - fmt.Fprintf(w, "%s (virtual %s)\n", units.HumanSize(float64(out.GetInt64("SizeRw"))), units.HumanSize(float64(out.GetInt64("SizeRootFs")))) - } else { - fmt.Fprintf(w, "%s\n", units.HumanSize(float64(out.GetInt64("SizeRw")))) - } - - continue - } - - fmt.Fprint(w, "\n") - } - - if !*quiet { - w.Flush() - } - - return nil -} - -func (cli *DockerCli) CmdCommit(args ...string) error { - cmd := cli.Subcmd("commit", "CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes", true) - flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit") - flComment := cmd.String([]string{"m", "-message"}, "", "Commit message") - flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith \")") - flChanges := opts.NewListOpts(nil) - cmd.Var(&flChanges, []string{"c", "-change"}, "Apply Dockerfile instruction to the created image") - // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. - flConfig := cmd.String([]string{"#run", "#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") - cmd.Require(flag.Max, 2) - cmd.Require(flag.Min, 1) - utils.ParseFlags(cmd, args, true) - - var ( - name = cmd.Arg(0) - repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) - ) - - //Check if the given image name can be resolved - if repository != "" { - if err := registry.ValidateRepositoryName(repository); err != nil { - return err - } - } - - v := url.Values{} - v.Set("container", name) - v.Set("repo", repository) - v.Set("tag", tag) - v.Set("comment", *flComment) - v.Set("author", *flAuthor) - for _, change := range flChanges.GetAll() { - v.Add("changes", change) - } - - if *flPause != true { - v.Set("pause", "0") - } - - var ( - config *runconfig.Config - env engine.Env - ) - if *flConfig != "" { - config = &runconfig.Config{} - if err := json.Unmarshal([]byte(*flConfig), config); err != nil { - return err - } - } - stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, false) - if err != nil { - return err - } - if err := env.Decode(stream); err != nil { - return err - } - - fmt.Fprintf(cli.out, "%s\n", env.Get("Id")) - return nil -} - -func (cli *DockerCli) CmdEvents(args ...string) error { - cmd := cli.Subcmd("events", "", "Get real time events from the server", true) - since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp") - until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp") - flFilter := opts.NewListOpts(nil) - cmd.Var(&flFilter, []string{"f", "-filter"}, "Filter output based on conditions provided") - cmd.Require(flag.Exact, 0) - - utils.ParseFlags(cmd, args, true) - - var ( - v = url.Values{} - loc = time.FixedZone(time.Now().Zone()) - eventFilterArgs = filters.Args{} - ) - - // Consolidate all filter flags, and sanity check them early. - // They'll get process in the daemon/server. - for _, f := range flFilter.GetAll() { - var err error - eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) - if err != nil { - return err - } - } - var setTime = func(key, value string) { - format := timeutils.RFC3339NanoFixed - if len(value) < len(format) { - format = format[:len(value)] - } - if t, err := time.ParseInLocation(format, value, loc); err == nil { - v.Set(key, strconv.FormatInt(t.Unix(), 10)) - } else { - v.Set(key, value) - } - } - if *since != "" { - setTime("since", *since) - } - if *until != "" { - setTime("until", *until) - } - if len(eventFilterArgs) > 0 { - filterJson, err := filters.ToParam(eventFilterArgs) - if err != nil { - return err - } - v.Set("filters", filterJson) - } - if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdExport(args ...string) error { - cmd := cli.Subcmd("export", "CONTAINER", "Export a filesystem as a tar archive (streamed to STDOUT by default)", true) - outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT") - cmd.Require(flag.Exact, 1) - - utils.ParseFlags(cmd, args, true) - - var ( - output io.Writer = cli.out - err error - ) - if *outfile != "" { - output, err = os.Create(*outfile) - if err != nil { - return err - } - } else if cli.isTerminalOut { - return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") - } - - if len(cmd.Args()) == 1 { - image := cmd.Arg(0) - if err := cli.stream("GET", "/containers/"+image+"/export", nil, output, nil); err != nil { - return err - } - } else { - v := url.Values{} - for _, arg := range cmd.Args() { - v.Add("names", arg) - } - if err := cli.stream("GET", "/containers/get?"+v.Encode(), nil, output, nil); err != nil { - return err - } - } - - return nil -} - -func (cli *DockerCli) CmdDiff(args ...string) error { - cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem", true) - cmd.Require(flag.Exact, 1) - - utils.ParseFlags(cmd, args, true) - - body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, false)) - - if err != nil { - return err - } - - outs := engine.NewTable("", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - for _, change := range outs.Data { - var kind string - switch change.GetInt("Kind") { - case archive.ChangeModify: - kind = "C" - case archive.ChangeAdd: - kind = "A" - case archive.ChangeDelete: - kind = "D" - } - fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path")) - } - return nil -} - -func (cli *DockerCli) CmdLogs(args ...string) error { - var ( - cmd = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container", true) - follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output") - times = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps") - tail = cmd.String([]string{"-tail"}, "all", "Number of lines to show from the end of the logs") - ) - cmd.Require(flag.Exact, 1) - - utils.ParseFlags(cmd, args, true) - - name := cmd.Arg(0) - - stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false) - if err != nil { - return err - } - - env := engine.Env{} - if err := env.Decode(stream); err != nil { - return err - } - - if env.GetSubEnv("HostConfig").GetSubEnv("LogConfig").Get("Type") != "json-file" { - return fmt.Errorf("\"logs\" command is supported only for \"json-file\" logging driver") - } - - v := url.Values{} - v.Set("stdout", "1") - v.Set("stderr", "1") - - if *times { - v.Set("timestamps", "1") - } - - if *follow { - v.Set("follow", "1") - } - v.Set("tail", *tail) - - return cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), env.GetSubEnv("Config").GetBool("Tty"), nil, cli.out, cli.err, nil) -} - -func (cli *DockerCli) CmdAttach(args ...string) error { - var ( - cmd = cli.Subcmd("attach", "CONTAINER", "Attach to a running container", true) - noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN") - proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process") - ) - cmd.Require(flag.Exact, 1) - - utils.ParseFlags(cmd, args, true) - name := cmd.Arg(0) - - stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false) - if err != nil { - return err - } - - env := engine.Env{} - if err := env.Decode(stream); err != nil { - return err - } - - if !env.GetSubEnv("State").GetBool("Running") { - return fmt.Errorf("You cannot attach to a stopped container, start it first") - } - - var ( - config = env.GetSubEnv("Config") - tty = config.GetBool("Tty") - ) - - if err := cli.CheckTtyInput(!*noStdin, tty); err != nil { - return err - } - - if tty && cli.isTerminalOut { - if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil { - log.Debugf("Error monitoring TTY size: %s", err) - } - } - - var in io.ReadCloser - - v := url.Values{} - v.Set("stream", "1") - if !*noStdin && config.GetBool("OpenStdin") { - v.Set("stdin", "1") - in = cli.in - } - - v.Set("stdout", "1") - v.Set("stderr", "1") - - if *proxy && !tty { - sigc := cli.forwardAllSignals(cmd.Arg(0)) - defer signal.StopCatch(sigc) - } - - if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil); err != nil { - return err - } - - _, status, err := getExitCode(cli, cmd.Arg(0)) - if err != nil { - return err - } - if status != 0 { - return &utils.StatusError{StatusCode: status} - } - - return nil -} - -func (cli *DockerCli) CmdSearch(args ...string) error { - cmd := cli.Subcmd("search", "TERM", "Search the Docker Hub for images", true) - noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output") - trusted := cmd.Bool([]string{"#t", "#trusted", "#-trusted"}, false, "Only show trusted builds") - automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds") - stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least x stars") - cmd.Require(flag.Exact, 1) - - utils.ParseFlags(cmd, args, true) - - v := url.Values{} - v.Set("term", cmd.Arg(0)) - - body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, true)) - - if err != nil { - return err - } - outs := engine.NewTable("star_count", 0) - if _, err := outs.ReadListFrom(body); err != nil { - return err - } - w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0) - fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") - for _, out := range outs.Data { - if ((*automated || *trusted) && (!out.GetBool("is_trusted") && !out.GetBool("is_automated"))) || (*stars > out.GetInt("star_count")) { - continue - } - desc := strings.Replace(out.Get("description"), "\n", " ", -1) - desc = strings.Replace(desc, "\r", " ", -1) - if !*noTrunc && len(desc) > 45 { - desc = utils.Trunc(desc, 42) + "..." - } - fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count")) - if out.GetBool("is_official") { - fmt.Fprint(w, "[OK]") - - } - fmt.Fprint(w, "\t") - if out.GetBool("is_automated") || out.GetBool("is_trusted") { - fmt.Fprint(w, "[OK]") - } - fmt.Fprint(w, "\n") - } - w.Flush() - return nil -} - -// Ports type - Used to parse multiple -p flags -type ports []int - -func (cli *DockerCli) CmdTag(args ...string) error { - cmd := cli.Subcmd("tag", "IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository", true) - force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force") - cmd.Require(flag.Exact, 2) - - utils.ParseFlags(cmd, args, true) - - var ( - repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1)) - v = url.Values{} - ) - - //Check if the given image name can be resolved - if err := registry.ValidateRepositoryName(repository); err != nil { - return err - } - v.Set("repo", repository) - v.Set("tag", tag) - - if *force { - v.Set("force", "1") - } - - if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, false)); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) pullImage(image string) error { - return cli.pullImageCustomOut(image, cli.out) -} - -func (cli *DockerCli) pullImageCustomOut(image string, out io.Writer) error { - v := url.Values{} - repos, tag := parsers.ParseRepositoryTag(image) - // pull only the image tagged 'latest' if no tag was specified - if tag == "" { - tag = graph.DEFAULTTAG - } - v.Set("fromImage", repos) - v.Set("tag", tag) - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(repos) - if err != nil { - return err - } - - // Load the auth config file, to be able to pull the image - cli.LoadConfigFile() - - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.ResolveAuthConfig(repoInfo.Index) - buf, err := json.Marshal(authConfig) - if err != nil { - return err - } - - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, out, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil { - return err - } - return nil -} - -type cidFile struct { - path string - file *os.File - written bool -} - -func newCIDFile(path string) (*cidFile, error) { - if _, err := os.Stat(path); err == nil { - return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) - } - - f, err := os.Create(path) - if err != nil { - return nil, fmt.Errorf("Failed to create the container ID file: %s", err) - } - - return &cidFile{path: path, file: f}, nil -} - -func (cid *cidFile) Close() error { - cid.file.Close() - - if !cid.written { - if err := os.Remove(cid.path); err != nil { - return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) - } - } - - return nil -} - -func (cid *cidFile) Write(id string) error { - if _, err := cid.file.Write([]byte(id)); err != nil { - return fmt.Errorf("Failed to write the container ID to the file: %s", err) - } - cid.written = true - return nil -} - -func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runconfig.HostConfig, cidfile, name string) (*types.ContainerCreateResponse, error) { - containerValues := url.Values{} - if name != "" { - containerValues.Set("name", name) - } - - mergedConfig := runconfig.MergeConfigs(config, hostConfig) - - var containerIDFile *cidFile - if cidfile != "" { - var err error - if containerIDFile, err = newCIDFile(cidfile); err != nil { - return nil, err - } - defer containerIDFile.Close() - } - - //create the container - stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, false) - //if image not found try to pull it - if statusCode == 404 { - repo, tag := parsers.ParseRepositoryTag(config.Image) - if tag == "" { - tag = graph.DEFAULTTAG - } - fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", utils.ImageReference(repo, tag)) - - // we don't want to write to stdout anything apart from container.ID - if err = cli.pullImageCustomOut(config.Image, cli.err); err != nil { - return nil, err - } - // Retry - if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, false); err != nil { - return nil, err - } - } else if err != nil { - return nil, err - } - - var response types.ContainerCreateResponse - if err := json.NewDecoder(stream).Decode(&response); err != nil { - return nil, err - } - for _, warning := range response.Warnings { - fmt.Fprintf(cli.err, "WARNING: %s\n", warning) - } - if containerIDFile != nil { - if err = containerIDFile.Write(response.ID); err != nil { - return nil, err - } - } - return &response, nil -} - -func (cli *DockerCli) CmdCreate(args ...string) error { - cmd := cli.Subcmd("create", "IMAGE [COMMAND] [ARG...]", "Create a new container", true) - - // These are flags not stored in Config/HostConfig - var ( - flName = cmd.String([]string{"-name"}, "", "Assign a name to the container") - ) - - config, hostConfig, cmd, err := runconfig.Parse(cmd, args) - if err != nil { - utils.ReportError(cmd, err.Error(), true) - } - if config.Image == "" { - cmd.Usage() - return nil - } - response, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName) - if err != nil { - return err - } - fmt.Fprintf(cli.out, "%s\n", response.ID) - return nil -} - -func (cli *DockerCli) CmdRun(args ...string) error { - // FIXME: just use runconfig.Parse already - cmd := cli.Subcmd("run", "IMAGE [COMMAND] [ARG...]", "Run a command in a new container", true) - - // These are flags not stored in Config/HostConfig - var ( - flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits") - flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Run container in background and print container ID") - flSigProxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process") - flName = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container") - flAttach *opts.ListOpts - - ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") - ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") - ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") - ) - - config, hostConfig, cmd, err := runconfig.Parse(cmd, args) - // just in case the Parse does not exit - if err != nil { - utils.ReportError(cmd, err.Error(), true) - } - - if len(hostConfig.Dns) > 0 { - // check the DNS settings passed via --dns against - // localhost regexp to warn if they are trying to - // set a DNS to a localhost address - for _, dnsIP := range hostConfig.Dns { - if resolvconf.IsLocalhost(dnsIP) { - fmt.Fprintf(cli.err, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) - break - } - } - } - if config.Image == "" { - cmd.Usage() - return nil - } - - if !*flDetach { - if err := cli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { - return err - } - } else { - if fl := cmd.Lookup("-attach"); fl != nil { - flAttach = fl.Value.(*opts.ListOpts) - if flAttach.Len() != 0 { - return ErrConflictAttachDetach - } - } - if *flAutoRemove { - return ErrConflictDetachAutoRemove - } - - config.AttachStdin = false - config.AttachStdout = false - config.AttachStderr = false - config.StdinOnce = false - } - - // Disable flSigProxy when in TTY mode - sigProxy := *flSigProxy - if config.Tty { - sigProxy = false - } - - createResponse, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName) - if err != nil { - return err - } - if sigProxy { - sigc := cli.forwardAllSignals(createResponse.ID) - defer signal.StopCatch(sigc) - } - var ( - waitDisplayId chan struct{} - errCh chan error - ) - if !config.AttachStdout && !config.AttachStderr { - // Make this asynchronous to allow the client to write to stdin before having to read the ID - waitDisplayId = make(chan struct{}) - go func() { - defer close(waitDisplayId) - fmt.Fprintf(cli.out, "%s\n", createResponse.ID) - }() - } - if *flAutoRemove && (hostConfig.RestartPolicy.Name == "always" || hostConfig.RestartPolicy.Name == "on-failure") { - return ErrConflictRestartPolicyAndAutoRemove - } - // We need to instantiate the chan because the select needs it. It can - // be closed but can't be uninitialized. - hijacked := make(chan io.Closer) - // Block the return until the chan gets closed - defer func() { - log.Debugf("End of CmdRun(), Waiting for hijack to finish.") - if _, ok := <-hijacked; ok { - log.Errorf("Hijack did not finish (chan still open)") - } - }() - if config.AttachStdin || config.AttachStdout || config.AttachStderr { - var ( - out, stderr io.Writer - in io.ReadCloser - v = url.Values{} - ) - v.Set("stream", "1") - if config.AttachStdin { - v.Set("stdin", "1") - in = cli.in - } - if config.AttachStdout { - v.Set("stdout", "1") - out = cli.out - } - if config.AttachStderr { - v.Set("stderr", "1") - if config.Tty { - stderr = cli.out - } else { - stderr = cli.err - } - } - errCh = promise.Go(func() error { - return cli.hijack("POST", "/containers/"+createResponse.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked, nil) - }) - } else { - close(hijacked) - } - // Acknowledge the hijack before starting - select { - case closer := <-hijacked: - // Make sure that the hijack gets closed when returning (results - // in closing the hijack chan and freeing server's goroutines) - if closer != nil { - defer closer.Close() - } - case err := <-errCh: - if err != nil { - log.Debugf("Error hijack: %s", err) - return err - } - } - - defer func() { - if *flAutoRemove { - if _, _, err = readBody(cli.call("DELETE", "/containers/"+createResponse.ID+"?v=1", nil, false)); err != nil { - log.Errorf("Error deleting container: %s", err) - } - } - }() - - //start the container - if _, _, err = readBody(cli.call("POST", "/containers/"+createResponse.ID+"/start", nil, false)); err != nil { - return err - } - - if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut { - if err := cli.monitorTtySize(createResponse.ID, false); err != nil { - log.Errorf("Error monitoring TTY size: %s", err) - } - } - - if errCh != nil { - if err := <-errCh; err != nil { - log.Debugf("Error hijack: %s", err) - return err - } - } - - // Detached mode: wait for the id to be displayed and return. - if !config.AttachStdout && !config.AttachStderr { - // Detached mode - <-waitDisplayId - return nil - } - - var status int - - // Attached mode - if *flAutoRemove { - // Autoremove: wait for the container to finish, retrieve - // the exit code and remove the container - if _, _, err := readBody(cli.call("POST", "/containers/"+createResponse.ID+"/wait", nil, false)); err != nil { - return err - } - if _, status, err = getExitCode(cli, createResponse.ID); err != nil { - return err - } - } else { - // No Autoremove: Simply retrieve the exit code - if !config.Tty { - // In non-TTY mode, we can't detach, so we must wait for container exit - if status, err = waitForExit(cli, createResponse.ID); err != nil { - return err - } - } else { - // In TTY mode, there is a race: if the process dies too slowly, the state could - // be updated after the getExitCode call and result in the wrong exit code being reported - if _, status, err = getExitCode(cli, createResponse.ID); err != nil { - return err - } - } - } - if status != 0 { - return &utils.StatusError{StatusCode: status} - } - return nil -} - -func (cli *DockerCli) CmdCp(args ...string) error { - cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTDIR|-", "Copy files/folders from a PATH on the container to a HOSTDIR on the host\nrunning the command. Use '-' to write the data\nas a tar file to STDOUT.", true) - cmd.Require(flag.Exact, 2) - - utils.ParseFlags(cmd, args, true) - - var copyData engine.Env - info := strings.Split(cmd.Arg(0), ":") - - if len(info) != 2 { - return fmt.Errorf("Error: Path not specified") - } - - copyData.Set("Resource", info[1]) - copyData.Set("HostPath", cmd.Arg(1)) - - stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false) - if stream != nil { - defer stream.Close() - } - if statusCode == 404 { - return fmt.Errorf("No such container: %v", info[0]) - } - if err != nil { - return err - } - - if statusCode == 200 { - dest := copyData.Get("HostPath") - - if dest == "-" { - _, err = io.Copy(cli.out, stream) - } else { - err = archive.Untar(stream, dest, &archive.TarOptions{NoLchown: true}) - } - if err != nil { - return err - } - } - return nil -} - -func (cli *DockerCli) CmdSave(args ...string) error { - cmd := cli.Subcmd("save", "IMAGE [IMAGE...]", "Save an image(s) to a tar archive (streamed to STDOUT by default)", true) - outfile := cmd.String([]string{"o", "-output"}, "", "Write to an file, instead of STDOUT") - cmd.Require(flag.Min, 1) - - utils.ParseFlags(cmd, args, true) - - var ( - output io.Writer = cli.out - err error - ) - if *outfile != "" { - output, err = os.Create(*outfile) - if err != nil { - return err - } - } else if cli.isTerminalOut { - return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") - } - - if len(cmd.Args()) == 1 { - image := cmd.Arg(0) - if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil { - return err - } - } else { - v := url.Values{} - for _, arg := range cmd.Args() { - v.Add("names", arg) - } - if err := cli.stream("GET", "/images/get?"+v.Encode(), nil, output, nil); err != nil { - return err - } - } - return nil -} - -func (cli *DockerCli) CmdLoad(args ...string) error { - cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN", true) - infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN") - cmd.Require(flag.Exact, 0) - - utils.ParseFlags(cmd, args, true) - - var ( - input io.Reader = cli.in - err error - ) - if *infile != "" { - input, err = os.Open(*infile) - if err != nil { - return err - } - } - if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil { - return err - } - return nil -} - -func (cli *DockerCli) CmdExec(args ...string) error { - cmd := cli.Subcmd("exec", "CONTAINER COMMAND [ARG...]", "Run a command in a running container", true) - - execConfig, err := runconfig.ParseExec(cmd, args) - // just in case the ParseExec does not exit - if execConfig.Container == "" || err != nil { - return &utils.StatusError{StatusCode: 1} - } - - stream, _, err := cli.call("POST", "/containers/"+execConfig.Container+"/exec", execConfig, false) - if err != nil { - return err - } - - var response types.ContainerExecCreateResponse - if err := json.NewDecoder(stream).Decode(&response); err != nil { - return err - } - for _, warning := range response.Warnings { - fmt.Fprintf(cli.err, "WARNING: %s\n", warning) - } - - execID := response.ID - - if execID == "" { - fmt.Fprintf(cli.out, "exec ID empty") - return nil - } - - if !execConfig.Detach { - if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { - return err - } - } else { - if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execConfig, false)); err != nil { - return err - } - // For now don't print this - wait for when we support exec wait() - // fmt.Fprintf(cli.out, "%s\n", execID) - return nil - } - - // Interactive exec requested. - var ( - out, stderr io.Writer - in io.ReadCloser - hijacked = make(chan io.Closer) - errCh chan error - ) - - // Block the return until the chan gets closed - defer func() { - log.Debugf("End of CmdExec(), Waiting for hijack to finish.") - if _, ok := <-hijacked; ok { - log.Errorf("Hijack did not finish (chan still open)") - } - }() - - if execConfig.AttachStdin { - in = cli.in - } - if execConfig.AttachStdout { - out = cli.out - } - if execConfig.AttachStderr { - if execConfig.Tty { - stderr = cli.out - } else { - stderr = cli.err - } - } - errCh = promise.Go(func() error { - return cli.hijack("POST", "/exec/"+execID+"/start", execConfig.Tty, in, out, stderr, hijacked, execConfig) - }) - - // Acknowledge the hijack before starting - select { - case closer := <-hijacked: - // Make sure that hijack gets closed when returning. (result - // in closing hijack chan and freeing server's goroutines. - if closer != nil { - defer closer.Close() - } - case err := <-errCh: - if err != nil { - log.Debugf("Error hijack: %s", err) - return err - } - } - - if execConfig.Tty && cli.isTerminalIn { - if err := cli.monitorTtySize(execID, true); err != nil { - log.Errorf("Error monitoring TTY size: %s", err) - } - } - - if err := <-errCh; err != nil { - log.Debugf("Error hijack: %s", err) - return err - } - - var status int - if _, status, err = getExecExitCode(cli, execID); err != nil { - return err - } - - if status != 0 { - return &utils.StatusError{StatusCode: status} - } - - return nil -} - -type containerStats struct { - Name string - CpuPercentage float64 - Memory float64 - MemoryLimit float64 - MemoryPercentage float64 - NetworkRx float64 - NetworkTx float64 - mu sync.RWMutex - err error -} - -func (s *containerStats) Collect(cli *DockerCli) { - stream, _, err := cli.call("GET", "/containers/"+s.Name+"/stats", nil, false) - if err != nil { - s.err = err - return - } - defer stream.Close() - var ( - previousCpu uint64 - previousSystem uint64 - start = true - dec = json.NewDecoder(stream) - u = make(chan error, 1) - ) - go func() { - for { - var v *types.Stats - if err := dec.Decode(&v); err != nil { - u <- err - return - } - var ( - memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 - cpuPercent = 0.0 - ) - if !start { - cpuPercent = calculateCpuPercent(previousCpu, previousSystem, v) - } - start = false - s.mu.Lock() - s.CpuPercentage = cpuPercent - s.Memory = float64(v.MemoryStats.Usage) - s.MemoryLimit = float64(v.MemoryStats.Limit) - s.MemoryPercentage = memPercent - s.NetworkRx = float64(v.Network.RxBytes) - s.NetworkTx = float64(v.Network.TxBytes) - s.mu.Unlock() - previousCpu = v.CpuStats.CpuUsage.TotalUsage - previousSystem = v.CpuStats.SystemUsage - u <- nil - } - }() - for { - select { - case <-time.After(2 * time.Second): - // zero out the values if we have not received an update within - // the specified duration. - s.mu.Lock() - s.CpuPercentage = 0 - s.Memory = 0 - s.MemoryPercentage = 0 - s.mu.Unlock() - case err := <-u: - if err != nil { - s.mu.Lock() - s.err = err - s.mu.Unlock() - return - } - } - } -} - -func (s *containerStats) Display(w io.Writer) error { - s.mu.RLock() - defer s.mu.RUnlock() - if s.err != nil { - return s.err - } - fmt.Fprintf(w, "%s\t%.2f%%\t%s/%s\t%.2f%%\t%s/%s\n", - s.Name, - s.CpuPercentage, - units.BytesSize(s.Memory), units.BytesSize(s.MemoryLimit), - s.MemoryPercentage, - units.BytesSize(s.NetworkRx), units.BytesSize(s.NetworkTx)) - return nil -} - -func (cli *DockerCli) CmdStats(args ...string) error { - cmd := cli.Subcmd("stats", "CONTAINER [CONTAINER...]", "Display a live stream of one or more containers' resource usage statistics", true) - cmd.Require(flag.Min, 1) - utils.ParseFlags(cmd, args, true) - - names := cmd.Args() - sort.Strings(names) - var ( - cStats []*containerStats - w = tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0) - ) - printHeader := func() { - fmt.Fprint(cli.out, "\033[2J") - fmt.Fprint(cli.out, "\033[H") - fmt.Fprintln(w, "CONTAINER\tCPU %\tMEM USAGE/LIMIT\tMEM %\tNET I/O") - } - for _, n := range names { - s := &containerStats{Name: n} - cStats = append(cStats, s) - go s.Collect(cli) - } - // do a quick pause so that any failed connections for containers that do not exist are able to be - // evicted before we display the initial or default values. - time.Sleep(500 * time.Millisecond) - var errs []string - for _, c := range cStats { - c.mu.Lock() - if c.err != nil { - errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err)) - } - c.mu.Unlock() - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, ", ")) - } - for _ = range time.Tick(500 * time.Millisecond) { - printHeader() - toRemove := []int{} - for i, s := range cStats { - if err := s.Display(w); err != nil { - toRemove = append(toRemove, i) - } - } - for j := len(toRemove) - 1; j >= 0; j-- { - i := toRemove[j] - cStats = append(cStats[:i], cStats[i+1:]...) - } - if len(cStats) == 0 { - return nil - } - w.Flush() - } - return nil -} - -func calculateCpuPercent(previousCpu, previousSystem uint64, v *types.Stats) float64 { - var ( - cpuPercent = 0.0 - // calculate the change for the cpu usage of the container in between readings - cpuDelta = float64(v.CpuStats.CpuUsage.TotalUsage - previousCpu) - // calculate the change for the entire system between readings - systemDelta = float64(v.CpuStats.SystemUsage - previousSystem) - ) - - if systemDelta > 0.0 && cpuDelta > 0.0 { - cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CpuStats.CpuUsage.PercpuUsage)) * 100.0 - } - return cpuPercent -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go deleted file mode 100644 index 4f89c3a7..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/hijack.go +++ /dev/null @@ -1,250 +0,0 @@ -package client - -import ( - "crypto/tls" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/http/httputil" - "os" - "runtime" - "strings" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/api" - "github.com/docker/docker/autogen/dockerversion" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/docker/pkg/term" -) - -type tlsClientCon struct { - *tls.Conn - rawConn net.Conn -} - -func (c *tlsClientCon) CloseWrite() error { - // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it - // on its underlying connection. - if cwc, ok := c.rawConn.(interface { - CloseWrite() error - }); ok { - return cwc.CloseWrite() - } - return nil -} - -func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { - return tlsDialWithDialer(new(net.Dialer), network, addr, config) -} - -// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in -// order to return our custom tlsClientCon struct which holds both the tls.Conn -// object _and_ its underlying raw connection. The rationale for this is that -// we need to be able to close the write end of the connection when attaching, -// which tls.Conn does not provide. -func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := dialer.Deadline.Sub(time.Now()) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout - } - } - - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- errors.New("") - }) - } - - rawConn, err := dialer.Dial(network, addr) - if err != nil { - return nil, err - } - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := rawConn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - c := *config - c.ServerName = hostname - config = &c - } - - conn := tls.Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - - err = <-errChannel - } - - if err != nil { - rawConn.Close() - return nil, err - } - - // This is Docker difference with standard's crypto/tls package: returned a - // wrapper which holds both the TLS and raw connections. - return &tlsClientCon{conn, rawConn}, nil -} - -func (cli *DockerCli) dial() (net.Conn, error) { - if cli.tlsConfig != nil && cli.proto != "unix" { - // Notice this isn't Go standard's tls.Dial function - return tlsDial(cli.proto, cli.addr, cli.tlsConfig) - } - return net.Dial(cli.proto, cli.addr) -} - -func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer, data interface{}) error { - defer func() { - if started != nil { - close(started) - } - }() - - params, err := cli.encodeData(data) - if err != nil { - return err - } - req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params) - if err != nil { - return err - } - req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) - req.Header.Set("Content-Type", "text/plain") - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", "tcp") - req.Host = cli.addr - - dial, err := cli.dial() - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := dial.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") - } - return err - } - clientconn := httputil.NewClientConn(dial, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - clientconn.Do(req) - - rwc, br := clientconn.Hijack() - defer rwc.Close() - - if started != nil { - started <- rwc - } - - var receiveStdout chan error - - var oldState *term.State - - if in != nil && setRawTerminal && cli.isTerminalIn && os.Getenv("NORAW") == "" { - oldState, err = term.SetRawTerminal(cli.inFd) - if err != nil { - return err - } - defer term.RestoreTerminal(cli.inFd, oldState) - } - - if stdout != nil || stderr != nil { - receiveStdout = promise.Go(func() (err error) { - defer func() { - if in != nil { - if setRawTerminal && cli.isTerminalIn { - term.RestoreTerminal(cli.inFd, oldState) - } - // For some reason this Close call blocks on darwin.. - // As the client exists right after, simply discard the close - // until we find a better solution. - if runtime.GOOS != "darwin" { - in.Close() - } - } - }() - - // When TTY is ON, use regular copy - if setRawTerminal && stdout != nil { - _, err = io.Copy(stdout, br) - } else { - _, err = stdcopy.StdCopy(stdout, stderr, br) - } - log.Debugf("[hijack] End of stdout") - return err - }) - } - - sendStdin := promise.Go(func() error { - if in != nil { - io.Copy(rwc, in) - log.Debugf("[hijack] End of stdin") - } - - if conn, ok := rwc.(interface { - CloseWrite() error - }); ok { - if err := conn.CloseWrite(); err != nil { - log.Debugf("Couldn't send EOF: %s", err) - } - } - // Discard errors due to pipe interruption - return nil - }) - - if stdout != nil || stderr != nil { - if err := <-receiveStdout; err != nil { - log.Debugf("Error receiveStdout: %s", err) - return err - } - } - - if !cli.isTerminalIn { - if err := <-sendStdin; err != nil { - log.Debugf("Error sendStdin: %s", err) - return err - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go b/Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go deleted file mode 100644 index 103bfdec..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/client/utils.go +++ /dev/null @@ -1,296 +0,0 @@ -package client - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - gosignal "os/signal" - "strconv" - "strings" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/api" - "github.com/docker/docker/autogen/dockerversion" - "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/registry" - "github.com/docker/docker/utils" -) - -var ( - ErrConnectionRefused = errors.New("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") -) - -func (cli *DockerCli) HTTPClient() *http.Client { - return &http.Client{Transport: cli.transport} -} - -func (cli *DockerCli) encodeData(data interface{}) (*bytes.Buffer, error) { - params := bytes.NewBuffer(nil) - if data != nil { - if env, ok := data.(engine.Env); ok { - if err := env.Encode(params); err != nil { - return nil, err - } - } else { - buf, err := json.Marshal(data) - if err != nil { - return nil, err - } - if _, err := params.Write(buf); err != nil { - return nil, err - } - } - } - return params, nil -} - -func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) { - params, err := cli.encodeData(data) - if err != nil { - return nil, -1, err - } - req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), params) - if err != nil { - return nil, -1, err - } - if passAuthInfo { - cli.LoadConfigFile() - // Resolve the Auth config relevant for this server - authConfig := cli.configFile.Configs[registry.IndexServerAddress()] - getHeaders := func(authConfig registry.AuthConfig) (map[string][]string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return nil, err - } - registryAuthHeader := []string{ - base64.URLEncoding.EncodeToString(buf), - } - return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil - } - if headers, err := getHeaders(authConfig); err == nil && headers != nil { - for k, v := range headers { - req.Header[k] = v - } - } - } - req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) - req.URL.Host = cli.addr - req.URL.Scheme = cli.scheme - if data != nil { - req.Header.Set("Content-Type", "application/json") - } else if method == "POST" { - req.Header.Set("Content-Type", "text/plain") - } - resp, err := cli.HTTPClient().Do(req) - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return nil, -1, ErrConnectionRefused - } - - if cli.tlsConfig == nil { - return nil, -1, fmt.Errorf("%v. Are you trying to connect to a TLS-enabled daemon without TLS?", err) - } - return nil, -1, fmt.Errorf("An error occurred trying to connect: %v", err) - - } - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, -1, err - } - if len(body) == 0 { - return nil, resp.StatusCode, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(resp.StatusCode), req.URL) - } - return nil, resp.StatusCode, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body)) - } - - return resp.Body, resp.StatusCode, nil -} - -func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error { - return cli.streamHelper(method, path, true, in, out, nil, headers) -} - -func (cli *DockerCli) streamHelper(method, path string, setRawTerminal bool, in io.Reader, stdout, stderr io.Writer, headers map[string][]string) error { - if (method == "POST" || method == "PUT") && in == nil { - in = bytes.NewReader([]byte{}) - } - - req, err := http.NewRequest(method, fmt.Sprintf("/v%s%s", api.APIVERSION, path), in) - if err != nil { - return err - } - req.Header.Set("User-Agent", "Docker-Client/"+dockerversion.VERSION) - req.URL.Host = cli.addr - req.URL.Scheme = cli.scheme - if method == "POST" { - req.Header.Set("Content-Type", "text/plain") - } - - if headers != nil { - for k, v := range headers { - req.Header[k] = v - } - } - resp, err := cli.HTTPClient().Do(req) - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker -d' running on this host?") - } - return err - } - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - if len(body) == 0 { - return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode)) - } - return fmt.Errorf("Error: %s", bytes.TrimSpace(body)) - } - - if api.MatchesContentType(resp.Header.Get("Content-Type"), "application/json") { - return utils.DisplayJSONMessagesStream(resp.Body, stdout, cli.outFd, cli.isTerminalOut) - } - if stdout != nil || stderr != nil { - // When TTY is ON, use regular copy - if setRawTerminal { - _, err = io.Copy(stdout, resp.Body) - } else { - _, err = stdcopy.StdCopy(stdout, stderr, resp.Body) - } - log.Debugf("[stream] End of stdout") - return err - } - return nil -} - -func (cli *DockerCli) resizeTty(id string, isExec bool) { - height, width := cli.getTtySize() - if height == 0 && width == 0 { - return - } - v := url.Values{} - v.Set("h", strconv.Itoa(height)) - v.Set("w", strconv.Itoa(width)) - - path := "" - if !isExec { - path = "/containers/" + id + "/resize?" - } else { - path = "/exec/" + id + "/resize?" - } - - if _, _, err := readBody(cli.call("POST", path+v.Encode(), nil, false)); err != nil { - log.Debugf("Error resize: %s", err) - } -} - -func waitForExit(cli *DockerCli, containerId string) (int, error) { - stream, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil, false) - if err != nil { - return -1, err - } - - var out engine.Env - if err := out.Decode(stream); err != nil { - return -1, err - } - return out.GetInt("StatusCode"), nil -} - -// getExitCode perform an inspect on the container. It returns -// the running state and the exit code. -func getExitCode(cli *DockerCli, containerId string) (bool, int, error) { - stream, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil, false) - if err != nil { - // If we can't connect, then the daemon probably died. - if err != ErrConnectionRefused { - return false, -1, err - } - return false, -1, nil - } - - var result engine.Env - if err := result.Decode(stream); err != nil { - return false, -1, err - } - - state := result.GetSubEnv("State") - return state.GetBool("Running"), state.GetInt("ExitCode"), nil -} - -// getExecExitCode perform an inspect on the exec command. It returns -// the running state and the exit code. -func getExecExitCode(cli *DockerCli, execId string) (bool, int, error) { - stream, _, err := cli.call("GET", "/exec/"+execId+"/json", nil, false) - if err != nil { - // If we can't connect, then the daemon probably died. - if err != ErrConnectionRefused { - return false, -1, err - } - return false, -1, nil - } - - var result engine.Env - if err := result.Decode(stream); err != nil { - return false, -1, err - } - - return result.GetBool("Running"), result.GetInt("ExitCode"), nil -} - -func (cli *DockerCli) monitorTtySize(id string, isExec bool) error { - cli.resizeTty(id, isExec) - - sigchan := make(chan os.Signal, 1) - gosignal.Notify(sigchan, signal.SIGWINCH) - go func() { - for _ = range sigchan { - cli.resizeTty(id, isExec) - } - }() - return nil -} - -func (cli *DockerCli) getTtySize() (int, int) { - if !cli.isTerminalOut { - return 0, 0 - } - ws, err := term.GetWinsize(cli.outFd) - if err != nil { - log.Debugf("Error getting size: %s", err) - if ws == nil { - return 0, 0 - } - } - return int(ws.Height), int(ws.Width) -} - -func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) { - if stream != nil { - defer stream.Close() - } - if err != nil { - return nil, statusCode, err - } - body, err := ioutil.ReadAll(stream) - if err != nil { - return nil, -1, err - } - return body, statusCode, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/common.go b/Godeps/_workspace/src/github.com/docker/docker/api/common.go deleted file mode 100644 index f6a0bc48..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/common.go +++ /dev/null @@ -1,132 +0,0 @@ -package api - -import ( - "fmt" - "mime" - "os" - "path/filepath" - "strings" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/version" - "github.com/docker/libtrust" -) - -const ( - APIVERSION version.Version = "1.18" - DEFAULTHTTPHOST = "127.0.0.1" - DEFAULTUNIXSOCKET = "/var/run/docker.sock" - DefaultDockerfileName string = "Dockerfile" -) - -func ValidateHost(val string) (string, error) { - host, err := parsers.ParseHost(DEFAULTHTTPHOST, DEFAULTUNIXSOCKET, val) - if err != nil { - return val, err - } - return host, nil -} - -// TODO remove, used on < 1.5 in getContainersJSON -func DisplayablePorts(ports *engine.Table) string { - var ( - result = []string{} - hostMappings = []string{} - firstInGroupMap map[string]int - lastInGroupMap map[string]int - ) - firstInGroupMap = make(map[string]int) - lastInGroupMap = make(map[string]int) - ports.SetKey("PrivatePort") - ports.Sort() - for _, port := range ports.Data { - var ( - current = port.GetInt("PrivatePort") - portKey = port.Get("Type") - firstInGroup int - lastInGroup int - ) - if port.Get("IP") != "" { - if port.GetInt("PublicPort") != current { - hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type"))) - continue - } - portKey = fmt.Sprintf("%s/%s", port.Get("IP"), port.Get("Type")) - } - firstInGroup = firstInGroupMap[portKey] - lastInGroup = lastInGroupMap[portKey] - - if firstInGroup == 0 { - firstInGroupMap[portKey] = current - lastInGroupMap[portKey] = current - continue - } - - if current == (lastInGroup + 1) { - lastInGroupMap[portKey] = current - continue - } - result = append(result, FormGroup(portKey, firstInGroup, lastInGroup)) - firstInGroupMap[portKey] = current - lastInGroupMap[portKey] = current - } - for portKey, firstInGroup := range firstInGroupMap { - result = append(result, FormGroup(portKey, firstInGroup, lastInGroupMap[portKey])) - } - result = append(result, hostMappings...) - return strings.Join(result, ", ") -} - -func FormGroup(key string, start, last int) string { - var ( - group string - parts = strings.Split(key, "/") - groupType = parts[0] - ip = "" - ) - if len(parts) > 1 { - ip = parts[0] - groupType = parts[1] - } - if start == last { - group = fmt.Sprintf("%d", start) - } else { - group = fmt.Sprintf("%d-%d", start, last) - } - if ip != "" { - group = fmt.Sprintf("%s:%s->%s", ip, group, group) - } - return fmt.Sprintf("%s/%s", group, groupType) -} - -func MatchesContentType(contentType, expectedType string) bool { - mimetype, _, err := mime.ParseMediaType(contentType) - if err != nil { - log.Errorf("Error parsing media type: %s error: %v", contentType, err) - } - return err == nil && mimetype == expectedType -} - -// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, -// otherwise generates a new one -func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { - err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700) - if err != nil { - return nil, err - } - trustKey, err := libtrust.LoadKeyFile(trustKeyPath) - if err == libtrust.ErrKeyFileDoesNotExist { - trustKey, err = libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, fmt.Errorf("Error generating key: %s", err) - } - if err := libtrust.SaveKey(trustKeyPath, trustKey); err != nil { - return nil, fmt.Errorf("Error saving key file: %s", err) - } - } else if err != nil { - return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) - } - return trustKey, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server.go deleted file mode 100644 index 1b9c5562..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/server.go +++ /dev/null @@ -1,1624 +0,0 @@ -package server - -import ( - "bufio" - "bytes" - - "encoding/base64" - "encoding/json" - "expvar" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/pprof" - "os" - "strconv" - "strings" - - "crypto/tls" - "crypto/x509" - - "code.google.com/p/go.net/websocket" - "github.com/docker/libcontainer/user" - "github.com/gorilla/mux" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/daemon/networkdriver/portallocator" - "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/listenbuffer" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/docker/pkg/version" - "github.com/docker/docker/registry" - "github.com/docker/docker/utils" -) - -var ( - activationLock chan struct{} -) - -type HttpServer struct { - srv *http.Server - l net.Listener -} - -func (s *HttpServer) Serve() error { - return s.srv.Serve(s.l) -} -func (s *HttpServer) Close() error { - return s.l.Close() -} - -type HttpApiFunc func(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error - -func hijackServer(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { - conn, _, err := w.(http.Hijacker).Hijack() - if err != nil { - return nil, nil, err - } - // Flush the options to make sure the client sets the raw mode - conn.Write([]byte{}) - return conn, conn, nil -} - -func closeStreams(streams ...interface{}) { - for _, stream := range streams { - if tcpc, ok := stream.(interface { - CloseWrite() error - }); ok { - tcpc.CloseWrite() - } else if closer, ok := stream.(io.Closer); ok { - closer.Close() - } - } -} - -// Check to make sure request's Content-Type is application/json -func checkForJson(r *http.Request) error { - ct := r.Header.Get("Content-Type") - - // No Content-Type header is ok as long as there's no Body - if ct == "" { - if r.Body == nil || r.ContentLength == 0 { - return nil - } - } - - // Otherwise it better be json - if api.MatchesContentType(ct, "application/json") { - return nil - } - return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) -} - -//If we don't do this, POST method without Content-type (even with empty body) will fail -func parseForm(r *http.Request) error { - if r == nil { - return nil - } - if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { - return err - } - return nil -} - -func parseMultipartForm(r *http.Request) error { - if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { - return err - } - return nil -} - -func httpError(w http.ResponseWriter, err error) { - statusCode := http.StatusInternalServerError - // FIXME: this is brittle and should not be necessary. - // If we need to differentiate between different possible error types, we should - // create appropriate error types with clearly defined meaning. - errStr := strings.ToLower(err.Error()) - if strings.Contains(errStr, "no such") { - statusCode = http.StatusNotFound - } else if strings.Contains(errStr, "bad parameter") { - statusCode = http.StatusBadRequest - } else if strings.Contains(errStr, "conflict") { - statusCode = http.StatusConflict - } else if strings.Contains(errStr, "impossible") { - statusCode = http.StatusNotAcceptable - } else if strings.Contains(errStr, "wrong login/password") { - statusCode = http.StatusUnauthorized - } else if strings.Contains(errStr, "hasn't been activated") { - statusCode = http.StatusForbidden - } - - if err != nil { - log.Errorf("HTTP Error: statusCode=%d %v", statusCode, err) - http.Error(w, err.Error(), statusCode) - } -} - -// writeJSONEnv writes the engine.Env values to the http response stream as a -// json encoded body. -func writeJSONEnv(w http.ResponseWriter, code int, v engine.Env) error { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - return v.Encode(w) -} - -// writeJSON writes the value v to the http response stream as json with standard -// json encoding. -func writeJSON(w http.ResponseWriter, code int, v interface{}) error { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - return json.NewEncoder(w).Encode(v) -} - -func streamJSON(job *engine.Job, w http.ResponseWriter, flush bool) { - w.Header().Set("Content-Type", "application/json") - if flush { - job.Stdout.Add(utils.NewWriteFlusher(w)) - } else { - job.Stdout.Add(w) - } -} - -func getBoolParam(value string) (bool, error) { - if value == "" { - return false, nil - } - ret, err := strconv.ParseBool(value) - if err != nil { - return false, fmt.Errorf("Bad parameter") - } - return ret, nil -} - -func postAuth(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var ( - authConfig, err = ioutil.ReadAll(r.Body) - job = eng.Job("auth") - stdoutBuffer = bytes.NewBuffer(nil) - ) - if err != nil { - return err - } - job.Setenv("authConfig", string(authConfig)) - job.Stdout.Add(stdoutBuffer) - if err = job.Run(); err != nil { - return err - } - if status := engine.Tail(stdoutBuffer, 1); status != "" { - var env engine.Env - env.Set("Status", status) - return writeJSONEnv(w, http.StatusOK, env) - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func getVersion(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.Header().Set("Content-Type", "application/json") - eng.ServeHTTP(w, r) - return nil -} - -func postContainersKill(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := parseForm(r); err != nil { - return err - } - job := eng.Job("kill", vars["name"]) - if sig := r.Form.Get("signal"); sig != "" { - job.Args = append(job.Args, sig) - } - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func postContainersPause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := parseForm(r); err != nil { - return err - } - job := eng.Job("pause", vars["name"]) - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func postContainersUnpause(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := parseForm(r); err != nil { - return err - } - job := eng.Job("unpause", vars["name"]) - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func getContainersExport(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - job := eng.Job("export", vars["name"]) - job.Stdout.Add(w) - if err := job.Run(); err != nil { - return err - } - return nil -} - -func getImagesJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - - var ( - err error - outs *engine.Table - job = eng.Job("images") - ) - - job.Setenv("filters", r.Form.Get("filters")) - // FIXME this parameter could just be a match filter - job.Setenv("filter", r.Form.Get("filter")) - job.Setenv("all", r.Form.Get("all")) - - if version.GreaterThanOrEqualTo("1.7") { - streamJSON(job, w, false) - } else if outs, err = job.Stdout.AddListTable(); err != nil { - return err - } - - if err := job.Run(); err != nil { - return err - } - - if version.LessThan("1.7") && outs != nil { // Convert to legacy format - outsLegacy := engine.NewTable("Created", 0) - for _, out := range outs.Data { - for _, repoTag := range out.GetList("RepoTags") { - repo, tag := parsers.ParseRepositoryTag(repoTag) - outLegacy := &engine.Env{} - outLegacy.Set("Repository", repo) - outLegacy.SetJson("Tag", tag) - outLegacy.Set("Id", out.Get("Id")) - outLegacy.SetInt64("Created", out.GetInt64("Created")) - outLegacy.SetInt64("Size", out.GetInt64("Size")) - outLegacy.SetInt64("VirtualSize", out.GetInt64("VirtualSize")) - outsLegacy.Add(outLegacy) - } - } - w.Header().Set("Content-Type", "application/json") - if _, err := outsLegacy.WriteListTo(w); err != nil { - return err - } - } - return nil -} - -func getImagesViz(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if version.GreaterThan("1.6") { - w.WriteHeader(http.StatusNotFound) - return fmt.Errorf("This is now implemented in the client.") - } - eng.ServeHTTP(w, r) - return nil -} - -func getInfo(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.Header().Set("Content-Type", "application/json") - eng.ServeHTTP(w, r) - return nil -} - -func getEvents(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - - var job = eng.Job("events") - streamJSON(job, w, true) - job.Setenv("since", r.Form.Get("since")) - job.Setenv("until", r.Form.Get("until")) - job.Setenv("filters", r.Form.Get("filters")) - return job.Run() -} - -func getImagesHistory(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - var job = eng.Job("history", vars["name"]) - streamJSON(job, w, false) - - if err := job.Run(); err != nil { - return err - } - return nil -} - -func getContainersChanges(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var job = eng.Job("container_changes", vars["name"]) - streamJSON(job, w, false) - - return job.Run() -} - -func getContainersTop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if version.LessThan("1.4") { - return fmt.Errorf("top was improved a lot since 1.3, Please upgrade your docker client.") - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := parseForm(r); err != nil { - return err - } - - job := eng.Job("top", vars["name"], r.Form.Get("ps_args")) - streamJSON(job, w, false) - return job.Run() -} - -func getContainersJSON(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - var ( - err error - outs *engine.Table - job = eng.Job("containers") - ) - - job.Setenv("all", r.Form.Get("all")) - job.Setenv("size", r.Form.Get("size")) - job.Setenv("since", r.Form.Get("since")) - job.Setenv("before", r.Form.Get("before")) - job.Setenv("limit", r.Form.Get("limit")) - job.Setenv("filters", r.Form.Get("filters")) - - if version.GreaterThanOrEqualTo("1.5") { - streamJSON(job, w, false) - } else if outs, err = job.Stdout.AddTable(); err != nil { - return err - } - if err = job.Run(); err != nil { - return err - } - if version.LessThan("1.5") { // Convert to legacy format - for _, out := range outs.Data { - ports := engine.NewTable("", 0) - ports.ReadListFrom([]byte(out.Get("Ports"))) - out.Set("Ports", api.DisplayablePorts(ports)) - } - w.Header().Set("Content-Type", "application/json") - if _, err = outs.WriteListTo(w); err != nil { - return err - } - } - return nil -} - -func getContainersStats(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - name := vars["name"] - job := eng.Job("container_stats", name) - streamJSON(job, w, true) - return job.Run() -} - -func getContainersLogs(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - var ( - inspectJob = eng.Job("container_inspect", vars["name"]) - logsJob = eng.Job("logs", vars["name"]) - c, err = inspectJob.Stdout.AddEnv() - ) - if err != nil { - return err - } - logsJob.Setenv("follow", r.Form.Get("follow")) - logsJob.Setenv("tail", r.Form.Get("tail")) - logsJob.Setenv("stdout", r.Form.Get("stdout")) - logsJob.Setenv("stderr", r.Form.Get("stderr")) - logsJob.Setenv("timestamps", r.Form.Get("timestamps")) - // Validate args here, because we can't return not StatusOK after job.Run() call - stdout, stderr := logsJob.GetenvBool("stdout"), logsJob.GetenvBool("stderr") - if !(stdout || stderr) { - return fmt.Errorf("Bad parameters: you must choose at least one stream") - } - if err = inspectJob.Run(); err != nil { - return err - } - - var outStream, errStream io.Writer - outStream = utils.NewWriteFlusher(w) - - if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { - errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) - outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } else { - errStream = outStream - } - - logsJob.Stdout.Add(outStream) - logsJob.Stderr.Set(errStream) - if err := logsJob.Run(); err != nil { - fmt.Fprintf(outStream, "Error running logs job: %s\n", err) - } - return nil -} - -func postImagesTag(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - job := eng.Job("tag", vars["name"], r.Form.Get("repo"), r.Form.Get("tag")) - job.Setenv("force", r.Form.Get("force")) - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusCreated) - return nil -} - -func postCommit(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - var ( - config engine.Env - env engine.Env - job = eng.Job("commit", r.Form.Get("container")) - stdoutBuffer = bytes.NewBuffer(nil) - ) - - if err := checkForJson(r); err != nil { - return err - } - - if err := config.Decode(r.Body); err != nil { - log.Errorf("%s", err) - } - - if r.FormValue("pause") == "" && version.GreaterThanOrEqualTo("1.13") { - job.Setenv("pause", "1") - } else { - job.Setenv("pause", r.FormValue("pause")) - } - - job.Setenv("repo", r.Form.Get("repo")) - job.Setenv("tag", r.Form.Get("tag")) - job.Setenv("author", r.Form.Get("author")) - job.Setenv("comment", r.Form.Get("comment")) - job.SetenvList("changes", r.Form["changes"]) - job.SetenvSubEnv("config", &config) - - job.Stdout.Add(stdoutBuffer) - if err := job.Run(); err != nil { - return err - } - env.Set("Id", engine.Tail(stdoutBuffer, 1)) - return writeJSONEnv(w, http.StatusCreated, env) -} - -// Creates an image from Pull or from Import -func postImagesCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - - var ( - image = r.Form.Get("fromImage") - repo = r.Form.Get("repo") - tag = r.Form.Get("tag") - job *engine.Job - ) - authEncoded := r.Header.Get("X-Registry-Auth") - authConfig := ®istry.AuthConfig{} - if authEncoded != "" { - authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - authConfig = ®istry.AuthConfig{} - } - } - if image != "" { //pull - if tag == "" { - image, tag = parsers.ParseRepositoryTag(image) - } - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - job = eng.Job("pull", image, tag) - job.SetenvBool("parallel", version.GreaterThan("1.3")) - job.SetenvJson("metaHeaders", metaHeaders) - job.SetenvJson("authConfig", authConfig) - } else { //import - if tag == "" { - repo, tag = parsers.ParseRepositoryTag(repo) - } - job = eng.Job("import", r.Form.Get("fromSrc"), repo, tag) - job.Stdin.Add(r.Body) - job.SetenvList("changes", r.Form["changes"]) - } - - if version.GreaterThan("1.0") { - job.SetenvBool("json", true) - streamJSON(job, w, true) - } else { - job.Stdout.Add(utils.NewWriteFlusher(w)) - } - if err := job.Run(); err != nil { - if !job.Stdout.Used() { - return err - } - sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) - w.Write(sf.FormatError(err)) - } - - return nil -} - -func getImagesSearch(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - var ( - authEncoded = r.Header.Get("X-Registry-Auth") - authConfig = ®istry.AuthConfig{} - metaHeaders = map[string][]string{} - ) - - if authEncoded != "" { - authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { - // for a search it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - authConfig = ®istry.AuthConfig{} - } - } - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - - var job = eng.Job("search", r.Form.Get("term")) - job.SetenvJson("metaHeaders", metaHeaders) - job.SetenvJson("authConfig", authConfig) - streamJSON(job, w, false) - - return job.Run() -} - -func postImagesPush(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - if err := parseForm(r); err != nil { - return err - } - authConfig := ®istry.AuthConfig{} - - authEncoded := r.Header.Get("X-Registry-Auth") - if authEncoded != "" { - // the new format is to handle the authConfig as a header - authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { - // to increase compatibility to existing api it is defaulting to be empty - authConfig = ®istry.AuthConfig{} - } - } else { - // the old format is supported for compatibility if there was no authConfig header - if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { - return err - } - } - - job := eng.Job("push", vars["name"]) - job.SetenvJson("metaHeaders", metaHeaders) - job.SetenvJson("authConfig", authConfig) - job.Setenv("tag", r.Form.Get("tag")) - if version.GreaterThan("1.0") { - job.SetenvBool("json", true) - streamJSON(job, w, true) - } else { - job.Stdout.Add(utils.NewWriteFlusher(w)) - } - - if err := job.Run(); err != nil { - if !job.Stdout.Used() { - return err - } - sf := utils.NewStreamFormatter(version.GreaterThan("1.0")) - w.Write(sf.FormatError(err)) - } - return nil -} - -func getImagesGet(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := parseForm(r); err != nil { - return err - } - if version.GreaterThan("1.0") { - w.Header().Set("Content-Type", "application/x-tar") - } - var job *engine.Job - if name, ok := vars["name"]; ok { - job = eng.Job("image_export", name) - } else { - job = eng.Job("image_export", r.Form["names"]...) - } - job.Stdout.Add(w) - return job.Run() -} - -func postImagesLoad(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - job := eng.Job("load") - job.Stdin.Add(r.Body) - return job.Run() -} - -func postContainersCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return nil - } - if err := checkForJson(r); err != nil { - return err - } - var ( - job = eng.Job("create", r.Form.Get("name")) - outWarnings []string - stdoutBuffer = bytes.NewBuffer(nil) - warnings = bytes.NewBuffer(nil) - ) - - if err := job.DecodeEnv(r.Body); err != nil { - return err - } - // Read container ID from the first line of stdout - job.Stdout.Add(stdoutBuffer) - // Read warnings from stderr - job.Stderr.Add(warnings) - if err := job.Run(); err != nil { - return err - } - // Parse warnings from stderr - scanner := bufio.NewScanner(warnings) - for scanner.Scan() { - outWarnings = append(outWarnings, scanner.Text()) - } - return writeJSON(w, http.StatusCreated, &types.ContainerCreateResponse{ - ID: engine.Tail(stdoutBuffer, 1), - Warnings: outWarnings, - }) -} - -func postContainersRestart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - job := eng.Job("restart", vars["name"]) - job.Setenv("t", r.Form.Get("t")) - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func postContainerRename(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - newName := r.URL.Query().Get("name") - job := eng.Job("container_rename", vars["name"], newName) - job.Setenv("t", r.Form.Get("t")) - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func deleteContainers(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - job := eng.Job("rm", vars["name"]) - - job.Setenv("forceRemove", r.Form.Get("force")) - - job.Setenv("removeVolume", r.Form.Get("v")) - job.Setenv("removeLink", r.Form.Get("link")) - if err := job.Run(); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func deleteImages(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var job = eng.Job("image_delete", vars["name"]) - streamJSON(job, w, false) - job.Setenv("force", r.Form.Get("force")) - job.Setenv("noprune", r.Form.Get("noprune")) - - return job.Run() -} - -func postContainersStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var ( - name = vars["name"] - job = eng.Job("start", name) - ) - - // If contentLength is -1, we can assumed chunked encoding - // or more technically that the length is unknown - // http://golang.org/src/pkg/net/http/request.go#L139 - // net/http otherwise seems to swallow any headers related to chunked encoding - // including r.TransferEncoding - // allow a nil body for backwards compatibility - if r.Body != nil && (r.ContentLength > 0 || r.ContentLength == -1) { - if err := checkForJson(r); err != nil { - return err - } - - if err := job.DecodeEnv(r.Body); err != nil { - return err - } - } - - if err := job.Run(); err != nil { - if err.Error() == "Container already started" { - w.WriteHeader(http.StatusNotModified) - return nil - } - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func postContainersStop(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - job := eng.Job("stop", vars["name"]) - job.Setenv("t", r.Form.Get("t")) - if err := job.Run(); err != nil { - if err.Error() == "Container already stopped" { - w.WriteHeader(http.StatusNotModified) - return nil - } - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func postContainersWait(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var ( - env engine.Env - stdoutBuffer = bytes.NewBuffer(nil) - job = eng.Job("wait", vars["name"]) - ) - job.Stdout.Add(stdoutBuffer) - if err := job.Run(); err != nil { - return err - } - - env.Set("StatusCode", engine.Tail(stdoutBuffer, 1)) - return writeJSONEnv(w, http.StatusOK, env) -} - -func postContainersResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := eng.Job("resize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { - return err - } - return nil -} - -func postContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - var ( - job = eng.Job("container_inspect", vars["name"]) - c, err = job.Stdout.AddEnv() - ) - if err != nil { - return err - } - if err = job.Run(); err != nil { - return err - } - - inStream, outStream, err := hijackServer(w) - if err != nil { - return err - } - defer closeStreams(inStream, outStream) - - var errStream io.Writer - - if _, ok := r.Header["Upgrade"]; ok { - fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") - } else { - fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") - } - - if c.GetSubEnv("Config") != nil && !c.GetSubEnv("Config").GetBool("Tty") && version.GreaterThanOrEqualTo("1.6") { - errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) - outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } else { - errStream = outStream - } - - job = eng.Job("attach", vars["name"]) - job.Setenv("logs", r.Form.Get("logs")) - job.Setenv("stream", r.Form.Get("stream")) - job.Setenv("stdin", r.Form.Get("stdin")) - job.Setenv("stdout", r.Form.Get("stdout")) - job.Setenv("stderr", r.Form.Get("stderr")) - job.Stdin.Add(inStream) - job.Stdout.Add(outStream) - job.Stderr.Set(errStream) - if err := job.Run(); err != nil { - fmt.Fprintf(outStream, "Error attaching: %s\n", err) - - } - return nil -} - -func wsContainersAttach(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - if err := eng.Job("container_inspect", vars["name"]).Run(); err != nil { - return err - } - - h := websocket.Handler(func(ws *websocket.Conn) { - defer ws.Close() - job := eng.Job("attach", vars["name"]) - job.Setenv("logs", r.Form.Get("logs")) - job.Setenv("stream", r.Form.Get("stream")) - job.Setenv("stdin", r.Form.Get("stdin")) - job.Setenv("stdout", r.Form.Get("stdout")) - job.Setenv("stderr", r.Form.Get("stderr")) - job.Stdin.Add(ws) - job.Stdout.Add(ws) - job.Stderr.Set(ws) - if err := job.Run(); err != nil { - log.Errorf("Error attaching websocket: %s", err) - } - }) - h.ServeHTTP(w, r) - - return nil -} - -func getContainersByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var job = eng.Job("container_inspect", vars["name"]) - if version.LessThan("1.12") { - job.SetenvBool("raw", true) - } - streamJSON(job, w, false) - return job.Run() -} - -func getExecByID(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter 'id'") - } - var job = eng.Job("execInspect", vars["id"]) - streamJSON(job, w, false) - return job.Run() -} - -func getImagesByName(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - var job = eng.Job("image_inspect", vars["name"]) - if version.LessThan("1.12") { - job.SetenvBool("raw", true) - } - streamJSON(job, w, false) - return job.Run() -} - -func postBuild(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if version.LessThan("1.3") { - return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") - } - var ( - authEncoded = r.Header.Get("X-Registry-Auth") - authConfig = ®istry.AuthConfig{} - configFileEncoded = r.Header.Get("X-Registry-Config") - configFile = ®istry.ConfigFile{} - job = eng.Job("build") - ) - - // This block can be removed when API versions prior to 1.9 are deprecated. - // Both headers will be parsed and sent along to the daemon, but if a non-empty - // ConfigFile is present, any value provided as an AuthConfig directly will - // be overridden. See BuildFile::CmdFrom for details. - if version.LessThan("1.9") && authEncoded != "" { - authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - authConfig = ®istry.AuthConfig{} - } - } - - if configFileEncoded != "" { - configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded)) - if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - configFile = ®istry.ConfigFile{} - } - } - - if version.GreaterThanOrEqualTo("1.8") { - job.SetenvBool("json", true) - streamJSON(job, w, true) - } else { - job.Stdout.Add(utils.NewWriteFlusher(w)) - } - - if r.FormValue("forcerm") == "1" && version.GreaterThanOrEqualTo("1.12") { - job.Setenv("rm", "1") - } else if r.FormValue("rm") == "" && version.GreaterThanOrEqualTo("1.12") { - job.Setenv("rm", "1") - } else { - job.Setenv("rm", r.FormValue("rm")) - } - if r.FormValue("pull") == "1" && version.GreaterThanOrEqualTo("1.16") { - job.Setenv("pull", "1") - } - job.Stdin.Add(r.Body) - job.Setenv("remote", r.FormValue("remote")) - job.Setenv("dockerfile", r.FormValue("dockerfile")) - job.Setenv("t", r.FormValue("t")) - job.Setenv("q", r.FormValue("q")) - job.Setenv("nocache", r.FormValue("nocache")) - job.Setenv("forcerm", r.FormValue("forcerm")) - job.SetenvJson("authConfig", authConfig) - job.SetenvJson("configFile", configFile) - job.Setenv("memswap", r.FormValue("memswap")) - job.Setenv("memory", r.FormValue("memory")) - job.Setenv("cpusetcpus", r.FormValue("cpusetcpus")) - job.Setenv("cpushares", r.FormValue("cpushares")) - - // Job cancellation. Note: not all job types support this. - if closeNotifier, ok := w.(http.CloseNotifier); ok { - finished := make(chan struct{}) - defer close(finished) - go func() { - select { - case <-finished: - case <-closeNotifier.CloseNotify(): - log.Infof("Client disconnected, cancelling job: %v", job) - job.Cancel() - } - }() - } - - if err := job.Run(); err != nil { - if !job.Stdout.Used() { - return err - } - sf := utils.NewStreamFormatter(version.GreaterThanOrEqualTo("1.8")) - w.Write(sf.FormatError(err)) - } - return nil -} - -func postContainersCopy(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if vars == nil { - return fmt.Errorf("Missing parameter") - } - - var copyData engine.Env - - if err := checkForJson(r); err != nil { - return err - } - - if err := copyData.Decode(r.Body); err != nil { - return err - } - - if copyData.Get("Resource") == "" { - return fmt.Errorf("Path cannot be empty") - } - - origResource := copyData.Get("Resource") - - if copyData.Get("Resource")[0] == '/' { - copyData.Set("Resource", copyData.Get("Resource")[1:]) - } - - job := eng.Job("container_copy", vars["name"], copyData.Get("Resource")) - job.Stdout.Add(w) - w.Header().Set("Content-Type", "application/x-tar") - if err := job.Run(); err != nil { - log.Errorf("%v", err) - if strings.Contains(strings.ToLower(err.Error()), "no such id") { - w.WriteHeader(http.StatusNotFound) - } else if strings.Contains(err.Error(), "no such file or directory") { - return fmt.Errorf("Could not find the file %s in container %s", origResource, vars["name"]) - } - } - return nil -} - -func postContainerExecCreate(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return nil - } - var ( - name = vars["name"] - job = eng.Job("execCreate", name) - stdoutBuffer = bytes.NewBuffer(nil) - outWarnings []string - warnings = bytes.NewBuffer(nil) - ) - - if err := job.DecodeEnv(r.Body); err != nil { - return err - } - - job.Stdout.Add(stdoutBuffer) - // Read warnings from stderr - job.Stderr.Add(warnings) - // Register an instance of Exec in container. - if err := job.Run(); err != nil { - fmt.Fprintf(os.Stderr, "Error setting up exec command in container %s: %s\n", name, err) - return err - } - // Parse warnings from stderr - scanner := bufio.NewScanner(warnings) - for scanner.Scan() { - outWarnings = append(outWarnings, scanner.Text()) - } - - return writeJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{ - ID: engine.Tail(stdoutBuffer, 1), - Warnings: outWarnings, - }) -} - -// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. -func postContainerExecStart(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return nil - } - var ( - name = vars["name"] - job = eng.Job("execStart", name) - errOut io.Writer = os.Stderr - ) - - if err := job.DecodeEnv(r.Body); err != nil { - return err - } - if !job.GetenvBool("Detach") { - // Setting up the streaming http interface. - inStream, outStream, err := hijackServer(w) - if err != nil { - return err - } - defer closeStreams(inStream, outStream) - - var errStream io.Writer - - if _, ok := r.Header["Upgrade"]; ok { - fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") - } else { - fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") - } - - if !job.GetenvBool("Tty") && version.GreaterThanOrEqualTo("1.6") { - errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) - outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } else { - errStream = outStream - } - job.Stdin.Add(inStream) - job.Stdout.Add(outStream) - job.Stderr.Set(errStream) - errOut = outStream - } - // Now run the user process in container. - job.SetCloseIO(false) - if err := job.Run(); err != nil { - fmt.Fprintf(errOut, "Error starting exec command in container %s: %s\n", name, err) - return err - } - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func postContainerExecResize(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := parseForm(r); err != nil { - return err - } - if vars == nil { - return fmt.Errorf("Missing parameter") - } - if err := eng.Job("execResize", vars["name"], r.Form.Get("h"), r.Form.Get("w")).Run(); err != nil { - return err - } - return nil -} - -func optionsHandler(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.WriteHeader(http.StatusOK) - return nil -} -func writeCorsHeaders(w http.ResponseWriter, r *http.Request, corsHeaders string) { - log.Debugf("CORS header is enabled and set to: %s", corsHeaders) - w.Header().Add("Access-Control-Allow-Origin", corsHeaders) - w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") - w.Header().Add("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS") -} - -func ping(eng *engine.Engine, version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - _, err := w.Write([]byte{'O', 'K'}) - return err -} - -func makeHttpHandler(eng *engine.Engine, logging bool, localMethod string, localRoute string, handlerFunc HttpApiFunc, corsHeaders string, dockerVersion version.Version) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - // log the request - log.Debugf("Calling %s %s", localMethod, localRoute) - - if logging { - log.Infof("%s %s", r.Method, r.RequestURI) - } - - if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { - userAgent := strings.Split(r.Header.Get("User-Agent"), "/") - if len(userAgent) == 2 && !dockerVersion.Equal(version.Version(userAgent[1])) { - log.Debugf("Warning: client and server don't have the same version (client: %s, server: %s)", userAgent[1], dockerVersion) - } - } - version := version.Version(mux.Vars(r)["version"]) - if version == "" { - version = api.APIVERSION - } - if corsHeaders != "" { - writeCorsHeaders(w, r, corsHeaders) - } - - if version.GreaterThan(api.APIVERSION) { - http.Error(w, fmt.Errorf("client and server don't have same version (client : %s, server: %s)", version, api.APIVERSION).Error(), http.StatusNotFound) - return - } - - if err := handlerFunc(eng, version, w, r, mux.Vars(r)); err != nil { - log.Errorf("Handler for %s %s returned error: %s", localMethod, localRoute, err) - httpError(w, err) - } - } -} - -// Replicated from expvar.go as not public. -func expvarHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} - -func AttachProfiler(router *mux.Router) { - router.HandleFunc("/debug/vars", expvarHandler) - router.HandleFunc("/debug/pprof/", pprof.Index) - router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) - router.HandleFunc("/debug/pprof/profile", pprof.Profile) - router.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - router.HandleFunc("/debug/pprof/block", pprof.Handler("block").ServeHTTP) - router.HandleFunc("/debug/pprof/heap", pprof.Handler("heap").ServeHTTP) - router.HandleFunc("/debug/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) - router.HandleFunc("/debug/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) -} - -// we keep enableCors just for legacy usage, need to be removed in the future -func createRouter(eng *engine.Engine, logging, enableCors bool, corsHeaders string, dockerVersion string) *mux.Router { - r := mux.NewRouter() - if os.Getenv("DEBUG") != "" { - AttachProfiler(r) - } - m := map[string]map[string]HttpApiFunc{ - "GET": { - "/_ping": ping, - "/events": getEvents, - "/info": getInfo, - "/version": getVersion, - "/images/json": getImagesJSON, - "/images/viz": getImagesViz, - "/images/search": getImagesSearch, - "/images/get": getImagesGet, - "/images/{name:.*}/get": getImagesGet, - "/images/{name:.*}/history": getImagesHistory, - "/images/{name:.*}/json": getImagesByName, - "/containers/ps": getContainersJSON, - "/containers/json": getContainersJSON, - "/containers/{name:.*}/export": getContainersExport, - "/containers/{name:.*}/changes": getContainersChanges, - "/containers/{name:.*}/json": getContainersByName, - "/containers/{name:.*}/top": getContainersTop, - "/containers/{name:.*}/logs": getContainersLogs, - "/containers/{name:.*}/stats": getContainersStats, - "/containers/{name:.*}/attach/ws": wsContainersAttach, - "/exec/{id:.*}/json": getExecByID, - }, - "POST": { - "/auth": postAuth, - "/commit": postCommit, - "/build": postBuild, - "/images/create": postImagesCreate, - "/images/load": postImagesLoad, - "/images/{name:.*}/push": postImagesPush, - "/images/{name:.*}/tag": postImagesTag, - "/containers/create": postContainersCreate, - "/containers/{name:.*}/kill": postContainersKill, - "/containers/{name:.*}/pause": postContainersPause, - "/containers/{name:.*}/unpause": postContainersUnpause, - "/containers/{name:.*}/restart": postContainersRestart, - "/containers/{name:.*}/start": postContainersStart, - "/containers/{name:.*}/stop": postContainersStop, - "/containers/{name:.*}/wait": postContainersWait, - "/containers/{name:.*}/resize": postContainersResize, - "/containers/{name:.*}/attach": postContainersAttach, - "/containers/{name:.*}/copy": postContainersCopy, - "/containers/{name:.*}/exec": postContainerExecCreate, - "/exec/{name:.*}/start": postContainerExecStart, - "/exec/{name:.*}/resize": postContainerExecResize, - "/containers/{name:.*}/rename": postContainerRename, - }, - "DELETE": { - "/containers/{name:.*}": deleteContainers, - "/images/{name:.*}": deleteImages, - }, - "OPTIONS": { - "": optionsHandler, - }, - } - - // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" - // otherwise, all head values will be passed to HTTP handler - if corsHeaders == "" && enableCors { - corsHeaders = "*" - } - - for method, routes := range m { - for route, fct := range routes { - log.Debugf("Registering %s, %s", method, route) - // NOTE: scope issue, make sure the variables are local and won't be changed - localRoute := route - localFct := fct - localMethod := method - - // build the handler function - f := makeHttpHandler(eng, logging, localMethod, localRoute, localFct, corsHeaders, version.Version(dockerVersion)) - - // add the new route - if localRoute == "" { - r.Methods(localMethod).HandlerFunc(f) - } else { - r.Path("/v{version:[0-9.]+}" + localRoute).Methods(localMethod).HandlerFunc(f) - r.Path(localRoute).Methods(localMethod).HandlerFunc(f) - } - } - } - - return r -} - -// ServeRequest processes a single http request to the docker remote api. -// FIXME: refactor this to be part of Server and not require re-creating a new -// router each time. This requires first moving ListenAndServe into Server. -func ServeRequest(eng *engine.Engine, apiversion version.Version, w http.ResponseWriter, req *http.Request) { - router := createRouter(eng, false, true, "", "") - // Insert APIVERSION into the request as a convenience - req.URL.Path = fmt.Sprintf("/v%s%s", apiversion, req.URL.Path) - router.ServeHTTP(w, req) -} - -func lookupGidByName(nameOrGid string) (int, error) { - groupFile, err := user.GetGroupPath() - if err != nil { - return -1, err - } - groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { - return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid - }) - if err != nil { - return -1, err - } - if groups != nil && len(groups) > 0 { - return groups[0].Gid, nil - } - gid, err := strconv.Atoi(nameOrGid) - if err == nil { - log.Warnf("Could not find GID %d", gid) - return gid, nil - } - return -1, fmt.Errorf("Group %s not found", nameOrGid) -} - -func setupTls(cert, key, ca string, l net.Listener) (net.Listener, error) { - tlsCert, err := tls.LoadX509KeyPair(cert, key) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (%s, %s): %v", cert, key, err) - } - return nil, fmt.Errorf("Error reading X509 key pair (%s, %s): %q. Make sure the key is encrypted.", - cert, key, err) - } - tlsConfig := &tls.Config{ - NextProtos: []string{"http/1.1"}, - Certificates: []tls.Certificate{tlsCert}, - // Avoid fallback on insecure SSL protocols - MinVersion: tls.VersionTLS10, - } - - if ca != "" { - certPool := x509.NewCertPool() - file, err := ioutil.ReadFile(ca) - if err != nil { - return nil, fmt.Errorf("Could not read CA certificate: %v", err) - } - certPool.AppendCertsFromPEM(file) - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - tlsConfig.ClientCAs = certPool - } - - return tls.NewListener(l, tlsConfig), nil -} - -func newListener(proto, addr string, bufferRequests bool) (net.Listener, error) { - if bufferRequests { - return listenbuffer.NewListenBuffer(proto, addr, activationLock) - } - - return net.Listen(proto, addr) -} - -func changeGroup(addr string, nameOrGid string) error { - gid, err := lookupGidByName(nameOrGid) - if err != nil { - return err - } - - log.Debugf("%s group found. gid: %d", nameOrGid, gid) - return os.Chown(addr, 0, gid) -} - -func setSocketGroup(addr, group string) error { - if group == "" { - return nil - } - - if err := changeGroup(addr, group); err != nil { - if group != "docker" { - return err - } - log.Debugf("Warning: could not chgrp %s to docker: %v", addr, err) - } - - return nil -} - -func allocateDaemonPort(addr string) error { - host, port, err := net.SplitHostPort(addr) - if err != nil { - return err - } - - intPort, err := strconv.Atoi(port) - if err != nil { - return err - } - - var hostIPs []net.IP - if parsedIP := net.ParseIP(host); parsedIP != nil { - hostIPs = append(hostIPs, parsedIP) - } else if hostIPs, err = net.LookupIP(host); err != nil { - return fmt.Errorf("failed to lookup %s address in host specification", host) - } - - for _, hostIP := range hostIPs { - if _, err := portallocator.RequestPort(hostIP, "tcp", intPort); err != nil { - return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) - } - } - return nil -} - -func setupTcpHttp(addr string, job *engine.Job) (*HttpServer, error) { - if !job.GetenvBool("TlsVerify") { - log.Infof("/!\\ DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING /!\\") - } - - r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version")) - - l, err := newListener("tcp", addr, job.GetenvBool("BufferRequests")) - if err != nil { - return nil, err - } - - if err := allocateDaemonPort(addr); err != nil { - return nil, err - } - - if job.GetenvBool("Tls") || job.GetenvBool("TlsVerify") { - var tlsCa string - if job.GetenvBool("TlsVerify") { - tlsCa = job.Getenv("TlsCa") - } - l, err = setupTls(job.Getenv("TlsCert"), job.Getenv("TlsKey"), tlsCa, l) - if err != nil { - return nil, err - } - } - return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil -} - -type Server interface { - Serve() error - Close() error -} - -// ServeApi loops through all of the protocols sent in to docker and spawns -// off a go routine to setup a serving http.Server for each. -func ServeApi(job *engine.Job) engine.Status { - if len(job.Args) == 0 { - return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) - } - var ( - protoAddrs = job.Args - chErrors = make(chan error, len(protoAddrs)) - ) - activationLock = make(chan struct{}) - - for _, protoAddr := range protoAddrs { - protoAddrParts := strings.SplitN(protoAddr, "://", 2) - if len(protoAddrParts) != 2 { - return job.Errorf("usage: %s PROTO://ADDR [PROTO://ADDR ...]", job.Name) - } - go func() { - log.Infof("Listening for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) - srv, err := NewServer(protoAddrParts[0], protoAddrParts[1], job) - if err != nil { - chErrors <- err - return - } - job.Eng.OnShutdown(func() { - if err := srv.Close(); err != nil { - log.Error(err) - } - }) - if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { - err = nil - } - chErrors <- err - }() - } - - for i := 0; i < len(protoAddrs); i++ { - err := <-chErrors - if err != nil { - return job.Error(err) - } - } - - return engine.StatusOK -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_linux.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_linux.go deleted file mode 100644 index fff803dd..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_linux.go +++ /dev/null @@ -1,103 +0,0 @@ -// +build linux - -package server - -import ( - "fmt" - "net/http" - "os" - "syscall" - - "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/systemd" -) - -// NewServer sets up the required Server and does protocol specific checking. -func NewServer(proto, addr string, job *engine.Job) (Server, error) { - // Basic error and sanity checking - switch proto { - case "fd": - return nil, serveFd(addr, job) - case "tcp": - return setupTcpHttp(addr, job) - case "unix": - return setupUnixHttp(addr, job) - default: - return nil, fmt.Errorf("Invalid protocol format.") - } -} - -func setupUnixHttp(addr string, job *engine.Job) (*HttpServer, error) { - r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version")) - - if err := syscall.Unlink(addr); err != nil && !os.IsNotExist(err) { - return nil, err - } - mask := syscall.Umask(0777) - defer syscall.Umask(mask) - - l, err := newListener("unix", addr, job.GetenvBool("BufferRequests")) - if err != nil { - return nil, err - } - - if err := setSocketGroup(addr, job.Getenv("SocketGroup")); err != nil { - return nil, err - } - - if err := os.Chmod(addr, 0660); err != nil { - return nil, err - } - - return &HttpServer{&http.Server{Addr: addr, Handler: r}, l}, nil -} - -// serveFd creates an http.Server and sets it up to serve given a socket activated -// argument. -func serveFd(addr string, job *engine.Job) error { - r := createRouter(job.Eng, job.GetenvBool("Logging"), job.GetenvBool("EnableCors"), job.Getenv("CorsHeaders"), job.Getenv("Version")) - - ls, e := systemd.ListenFD(addr) - if e != nil { - return e - } - - chErrors := make(chan error, len(ls)) - - // We don't want to start serving on these sockets until the - // daemon is initialized and installed. Otherwise required handlers - // won't be ready. - <-activationLock - - // Since ListenFD will return one or more sockets we have - // to create a go func to spawn off multiple serves - for i := range ls { - listener := ls[i] - go func() { - httpSrv := http.Server{Handler: r} - chErrors <- httpSrv.Serve(listener) - }() - } - - for i := 0; i < len(ls); i++ { - err := <-chErrors - if err != nil { - return err - } - } - - return nil -} - -// Called through eng.Job("acceptconnections") -func AcceptConnections(job *engine.Job) engine.Status { - // Tell the init daemon we are accepting requests - go systemd.SdNotify("READY=1") - - // close the lock so the listeners start accepting connections - if activationLock != nil { - close(activationLock) - } - - return engine.StatusOK -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_unit_test.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_unit_test.go deleted file mode 100644 index b5ec7c89..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_unit_test.go +++ /dev/null @@ -1,553 +0,0 @@ -package server - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "reflect" - "strings" - "testing" - - "github.com/docker/docker/api" - "github.com/docker/docker/engine" - "github.com/docker/docker/pkg/version" -) - -func TestGetBoolParam(t *testing.T) { - if ret, err := getBoolParam("true"); err != nil || !ret { - t.Fatalf("true -> true, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("True"); err != nil || !ret { - t.Fatalf("True -> true, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("1"); err != nil || !ret { - t.Fatalf("1 -> true, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam(""); err != nil || ret { - t.Fatalf("\"\" -> false, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("false"); err != nil || ret { - t.Fatalf("false -> false, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("0"); err != nil || ret { - t.Fatalf("0 -> false, nil | got %t %s", ret, err) - } - if ret, err := getBoolParam("faux"); err == nil || ret { - t.Fatalf("faux -> false, err | got %t %s", ret, err) - - } -} - -func TesthttpError(t *testing.T) { - r := httptest.NewRecorder() - - httpError(r, fmt.Errorf("No such method")) - if r.Code != http.StatusNotFound { - t.Fatalf("Expected %d, got %d", http.StatusNotFound, r.Code) - } - - httpError(r, fmt.Errorf("This accound hasn't been activated")) - if r.Code != http.StatusForbidden { - t.Fatalf("Expected %d, got %d", http.StatusForbidden, r.Code) - } - - httpError(r, fmt.Errorf("Some error")) - if r.Code != http.StatusInternalServerError { - t.Fatalf("Expected %d, got %d", http.StatusInternalServerError, r.Code) - } -} - -func TestGetVersion(t *testing.T) { - eng := engine.New() - var called bool - eng.Register("version", func(job *engine.Job) engine.Status { - called = true - v := &engine.Env{} - v.SetJson("Version", "42.1") - v.Set("ApiVersion", "1.1.1.1.1") - v.Set("GoVersion", "2.42") - v.Set("Os", "Linux") - v.Set("Arch", "x86_64") - if _, err := v.WriteTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK - }) - r := serveRequest("GET", "/version", nil, eng, t) - if !called { - t.Fatalf("handler was not called") - } - v := readEnv(r.Body, t) - if v.Get("Version") != "42.1" { - t.Fatalf("%#v\n", v) - } - if r.HeaderMap.Get("Content-Type") != "application/json" { - t.Fatalf("%#v\n", r) - } -} - -func TestGetInfo(t *testing.T) { - eng := engine.New() - var called bool - eng.Register("info", func(job *engine.Job) engine.Status { - called = true - v := &engine.Env{} - v.SetInt("Containers", 1) - v.SetInt("Images", 42000) - if _, err := v.WriteTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK - }) - r := serveRequest("GET", "/info", nil, eng, t) - if !called { - t.Fatalf("handler was not called") - } - v := readEnv(r.Body, t) - if v.GetInt("Images") != 42000 { - t.Fatalf("%#v\n", v) - } - if v.GetInt("Containers") != 1 { - t.Fatalf("%#v\n", v) - } - assertContentType(r, "application/json", t) -} - -func TestGetImagesJSON(t *testing.T) { - eng := engine.New() - var called bool - eng.Register("images", func(job *engine.Job) engine.Status { - called = true - v := createEnvFromGetImagesJSONStruct(sampleImage) - if _, err := v.WriteTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK - }) - r := serveRequest("GET", "/images/json", nil, eng, t) - if !called { - t.Fatal("handler was not called") - } - assertHttpNotError(r, t) - assertContentType(r, "application/json", t) - var observed getImagesJSONStruct - if err := json.Unmarshal(r.Body.Bytes(), &observed); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(observed, sampleImage) { - t.Errorf("Expected %#v but got %#v", sampleImage, observed) - } -} - -func TestGetImagesJSONFilter(t *testing.T) { - eng := engine.New() - filter := "nothing" - eng.Register("images", func(job *engine.Job) engine.Status { - filter = job.Getenv("filter") - return engine.StatusOK - }) - serveRequest("GET", "/images/json?filter=aaaa", nil, eng, t) - if filter != "aaaa" { - t.Errorf("%#v", filter) - } -} - -func TestGetImagesJSONFilters(t *testing.T) { - eng := engine.New() - filter := "nothing" - eng.Register("images", func(job *engine.Job) engine.Status { - filter = job.Getenv("filters") - return engine.StatusOK - }) - serveRequest("GET", "/images/json?filters=nnnn", nil, eng, t) - if filter != "nnnn" { - t.Errorf("%#v", filter) - } -} - -func TestGetImagesJSONAll(t *testing.T) { - eng := engine.New() - allFilter := "-1" - eng.Register("images", func(job *engine.Job) engine.Status { - allFilter = job.Getenv("all") - return engine.StatusOK - }) - serveRequest("GET", "/images/json?all=1", nil, eng, t) - if allFilter != "1" { - t.Errorf("%#v", allFilter) - } -} - -func TestGetImagesJSONLegacyFormat(t *testing.T) { - eng := engine.New() - var called bool - eng.Register("images", func(job *engine.Job) engine.Status { - called = true - outsLegacy := engine.NewTable("Created", 0) - outsLegacy.Add(createEnvFromGetImagesJSONStruct(sampleImage)) - if _, err := outsLegacy.WriteListTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK - }) - r := serveRequestUsingVersion("GET", "/images/json", "1.6", nil, eng, t) - if !called { - t.Fatal("handler was not called") - } - assertHttpNotError(r, t) - assertContentType(r, "application/json", t) - images := engine.NewTable("Created", 0) - if _, err := images.ReadListFrom(r.Body.Bytes()); err != nil { - t.Fatal(err) - } - if images.Len() != 1 { - t.Fatalf("Expected 1 image, %d found", images.Len()) - } - image := images.Data[0] - if image.Get("Tag") != "test-tag" { - t.Errorf("Expected tag 'test-tag', found '%s'", image.Get("Tag")) - } - if image.Get("Repository") != "test-name" { - t.Errorf("Expected repository 'test-name', found '%s'", image.Get("Repository")) - } -} - -func TestGetContainersByName(t *testing.T) { - eng := engine.New() - name := "container_name" - var called bool - eng.Register("container_inspect", func(job *engine.Job) engine.Status { - called = true - if job.Args[0] != name { - t.Errorf("name != '%s': %#v", name, job.Args[0]) - } - if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") { - t.Errorf("dirty env variable not set") - } else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") { - t.Errorf("dirty env variable set when it shouldn't") - } - v := &engine.Env{} - v.SetBool("dirty", true) - if _, err := v.WriteTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK - }) - r := serveRequest("GET", "/containers/"+name+"/json", nil, eng, t) - if !called { - t.Fatal("handler was not called") - } - assertContentType(r, "application/json", t) - var stdoutJson interface{} - if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil { - t.Fatalf("%#v", err) - } - if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 { - t.Fatalf("%#v", stdoutJson) - } -} - -func TestGetEvents(t *testing.T) { - eng := engine.New() - var called bool - eng.Register("events", func(job *engine.Job) engine.Status { - called = true - since := job.Getenv("since") - if since != "1" { - t.Fatalf("'since' should be 1, found %#v instead", since) - } - until := job.Getenv("until") - if until != "0" { - t.Fatalf("'until' should be 0, found %#v instead", until) - } - v := &engine.Env{} - v.Set("since", since) - v.Set("until", until) - if _, err := v.WriteTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK - }) - r := serveRequest("GET", "/events?since=1&until=0", nil, eng, t) - if !called { - t.Fatal("handler was not called") - } - assertContentType(r, "application/json", t) - var stdout_json struct { - Since int - Until int - } - if err := json.Unmarshal(r.Body.Bytes(), &stdout_json); err != nil { - t.Fatal(err) - } - if stdout_json.Since != 1 { - t.Errorf("since != 1: %#v", stdout_json.Since) - } - if stdout_json.Until != 0 { - t.Errorf("until != 0: %#v", stdout_json.Until) - } -} - -func TestLogs(t *testing.T) { - eng := engine.New() - var inspect bool - var logs bool - eng.Register("container_inspect", func(job *engine.Job) engine.Status { - inspect = true - if len(job.Args) == 0 { - t.Fatal("Job arguments is empty") - } - if job.Args[0] != "test" { - t.Fatalf("Container name %s, must be test", job.Args[0]) - } - return engine.StatusOK - }) - expected := "logs" - eng.Register("logs", func(job *engine.Job) engine.Status { - logs = true - if len(job.Args) == 0 { - t.Fatal("Job arguments is empty") - } - if job.Args[0] != "test" { - t.Fatalf("Container name %s, must be test", job.Args[0]) - } - follow := job.Getenv("follow") - if follow != "1" { - t.Fatalf("follow: %s, must be 1", follow) - } - stdout := job.Getenv("stdout") - if stdout != "1" { - t.Fatalf("stdout %s, must be 1", stdout) - } - stderr := job.Getenv("stderr") - if stderr != "" { - t.Fatalf("stderr %s, must be empty", stderr) - } - timestamps := job.Getenv("timestamps") - if timestamps != "1" { - t.Fatalf("timestamps %s, must be 1", timestamps) - } - job.Stdout.Write([]byte(expected)) - return engine.StatusOK - }) - r := serveRequest("GET", "/containers/test/logs?follow=1&stdout=1×tamps=1", nil, eng, t) - if r.Code != http.StatusOK { - t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK) - } - if !inspect { - t.Fatal("container_inspect job was not called") - } - if !logs { - t.Fatal("logs job was not called") - } - res := r.Body.String() - if res != expected { - t.Fatalf("Output %s, expected %s", res, expected) - } -} - -func TestLogsNoStreams(t *testing.T) { - eng := engine.New() - var inspect bool - var logs bool - eng.Register("container_inspect", func(job *engine.Job) engine.Status { - inspect = true - if len(job.Args) == 0 { - t.Fatal("Job arguments is empty") - } - if job.Args[0] != "test" { - t.Fatalf("Container name %s, must be test", job.Args[0]) - } - return engine.StatusOK - }) - eng.Register("logs", func(job *engine.Job) engine.Status { - logs = true - return engine.StatusOK - }) - r := serveRequest("GET", "/containers/test/logs", nil, eng, t) - if r.Code != http.StatusBadRequest { - t.Fatalf("Got status %d, expected %d", r.Code, http.StatusBadRequest) - } - if inspect { - t.Fatal("container_inspect job was called, but it shouldn't") - } - if logs { - t.Fatal("logs job was called, but it shouldn't") - } - res := strings.TrimSpace(r.Body.String()) - expected := "Bad parameters: you must choose at least one stream" - if !strings.Contains(res, expected) { - t.Fatalf("Output %s, expected %s in it", res, expected) - } -} - -func TestGetImagesHistory(t *testing.T) { - eng := engine.New() - imageName := "docker-test-image" - var called bool - eng.Register("history", func(job *engine.Job) engine.Status { - called = true - if len(job.Args) == 0 { - t.Fatal("Job arguments is empty") - } - if job.Args[0] != imageName { - t.Fatalf("name != '%s': %#v", imageName, job.Args[0]) - } - v := &engine.Env{} - if _, err := v.WriteTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK - }) - r := serveRequest("GET", "/images/"+imageName+"/history", nil, eng, t) - if !called { - t.Fatalf("handler was not called") - } - if r.Code != http.StatusOK { - t.Fatalf("Got status %d, expected %d", r.Code, http.StatusOK) - } - if r.HeaderMap.Get("Content-Type") != "application/json" { - t.Fatalf("%#v\n", r) - } -} - -func TestGetImagesByName(t *testing.T) { - eng := engine.New() - name := "image_name" - var called bool - eng.Register("image_inspect", func(job *engine.Job) engine.Status { - called = true - if job.Args[0] != name { - t.Fatalf("name != '%s': %#v", name, job.Args[0]) - } - if api.APIVERSION.LessThan("1.12") && !job.GetenvBool("dirty") { - t.Fatal("dirty env variable not set") - } else if api.APIVERSION.GreaterThanOrEqualTo("1.12") && job.GetenvBool("dirty") { - t.Fatal("dirty env variable set when it shouldn't") - } - v := &engine.Env{} - v.SetBool("dirty", true) - if _, err := v.WriteTo(job.Stdout); err != nil { - return job.Error(err) - } - return engine.StatusOK - }) - r := serveRequest("GET", "/images/"+name+"/json", nil, eng, t) - if !called { - t.Fatal("handler was not called") - } - if r.HeaderMap.Get("Content-Type") != "application/json" { - t.Fatalf("%#v\n", r) - } - var stdoutJson interface{} - if err := json.Unmarshal(r.Body.Bytes(), &stdoutJson); err != nil { - t.Fatalf("%#v", err) - } - if stdoutJson.(map[string]interface{})["dirty"].(float64) != 1 { - t.Fatalf("%#v", stdoutJson) - } -} - -func TestDeleteContainers(t *testing.T) { - eng := engine.New() - name := "foo" - var called bool - eng.Register("rm", func(job *engine.Job) engine.Status { - called = true - if len(job.Args) == 0 { - t.Fatalf("Job arguments is empty") - } - if job.Args[0] != name { - t.Fatalf("name != '%s': %#v", name, job.Args[0]) - } - return engine.StatusOK - }) - r := serveRequest("DELETE", "/containers/"+name, nil, eng, t) - if !called { - t.Fatalf("handler was not called") - } - if r.Code != http.StatusNoContent { - t.Fatalf("Got status %d, expected %d", r.Code, http.StatusNoContent) - } -} - -func serveRequest(method, target string, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder { - return serveRequestUsingVersion(method, target, api.APIVERSION, body, eng, t) -} - -func serveRequestUsingVersion(method, target string, version version.Version, body io.Reader, eng *engine.Engine, t *testing.T) *httptest.ResponseRecorder { - r := httptest.NewRecorder() - req, err := http.NewRequest(method, target, body) - if err != nil { - t.Fatal(err) - } - ServeRequest(eng, version, r, req) - return r -} - -func readEnv(src io.Reader, t *testing.T) *engine.Env { - out := engine.NewOutput() - v, err := out.AddEnv() - if err != nil { - t.Fatal(err) - } - if _, err := io.Copy(out, src); err != nil { - t.Fatal(err) - } - out.Close() - return v -} - -func toJson(data interface{}, t *testing.T) io.Reader { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(data); err != nil { - t.Fatal(err) - } - return &buf -} - -func assertContentType(recorder *httptest.ResponseRecorder, content_type string, t *testing.T) { - if recorder.HeaderMap.Get("Content-Type") != content_type { - t.Fatalf("%#v\n", recorder) - } -} - -// XXX: Duplicated from integration/utils_test.go, but maybe that's OK as that -// should die as soon as we converted all integration tests? -// assertHttpNotError expect the given response to not have an error. -// Otherwise the it causes the test to fail. -func assertHttpNotError(r *httptest.ResponseRecorder, t *testing.T) { - // Non-error http status are [200, 400) - if r.Code < http.StatusOK || r.Code >= http.StatusBadRequest { - t.Fatal(fmt.Errorf("Unexpected http error: %v", r.Code)) - } -} - -func createEnvFromGetImagesJSONStruct(data getImagesJSONStruct) *engine.Env { - v := &engine.Env{} - v.SetList("RepoTags", data.RepoTags) - v.Set("Id", data.Id) - v.SetInt64("Created", data.Created) - v.SetInt64("Size", data.Size) - v.SetInt64("VirtualSize", data.VirtualSize) - return v -} - -type getImagesJSONStruct struct { - RepoTags []string - Id string - Created int64 - Size int64 - VirtualSize int64 -} - -var sampleImage getImagesJSONStruct = getImagesJSONStruct{ - RepoTags: []string{"test-name:test-tag"}, - Id: "ID", - Created: 999, - Size: 777, - VirtualSize: 666, -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_windows.go b/Godeps/_workspace/src/github.com/docker/docker/api/server/server_windows.go deleted file mode 100644 index c5d2c2ca..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/server/server_windows.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build windows - -package server - -import ( - "fmt" - - "github.com/docker/docker/engine" -) - -// NewServer sets up the required Server and does protocol specific checking. -func NewServer(proto, addr string, job *engine.Job) (Server, error) { - // Basic error and sanity checking - switch proto { - case "tcp": - return setupTcpHttp(addr, job) - default: - return nil, errors.New("Invalid protocol format. Windows only supports tcp.") - } -} - -// Called through eng.Job("acceptconnections") -func AcceptConnections(job *engine.Job) engine.Status { - - // close the lock so the listeners start accepting connections - if activationLock != nil { - close(activationLock) - } - - return engine.StatusOK -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/types/stats.go b/Godeps/_workspace/src/github.com/docker/docker/api/types/stats.go deleted file mode 100644 index 97804e95..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/types/stats.go +++ /dev/null @@ -1,87 +0,0 @@ -// This package is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types - -import "time" - -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hit its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// All CPU stats are aggregated since container inception. -type CpuUsage struct { - // Total CPU time consumed. - // Units: nanoseconds. - TotalUsage uint64 `json:"total_usage"` - // Total CPU time consumed per core. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage"` - // Time spent by tasks of the cgroup in kernel mode. - // Units: nanoseconds. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - // Time spent by tasks of the cgroup in user mode. - // Units: nanoseconds. - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -type CpuStats struct { - CpuUsage CpuUsage `json:"cpu_usage"` - SystemUsage uint64 `json:"system_cpu_usage"` - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -type MemoryStats struct { - // current res_counter usage for memory - Usage uint64 `json:"usage"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt"` - Limit uint64 `json:"limit"` -} - -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -type BlkioStats struct { - // number of bytes tranferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -type Network struct { - RxBytes uint64 `json:"rx_bytes"` - RxPackets uint64 `json:"rx_packets"` - RxErrors uint64 `json:"rx_errors"` - RxDropped uint64 `json:"rx_dropped"` - TxBytes uint64 `json:"tx_bytes"` - TxPackets uint64 `json:"tx_packets"` - TxErrors uint64 `json:"tx_errors"` - TxDropped uint64 `json:"tx_dropped"` -} - -type Stats struct { - Read time.Time `json:"read"` - Network Network `json:"network,omitempty"` - CpuStats CpuStats `json:"cpu_stats,omitempty"` - MemoryStats MemoryStats `json:"memory_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/api/types/types.go b/Godeps/_workspace/src/github.com/docker/docker/api/types/types.go deleted file mode 100644 index 5531135b..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/api/types/types.go +++ /dev/null @@ -1,20 +0,0 @@ -package types - -// ContainerCreateResponse contains the information returned to a client on the -// creation of a new container. -type ContainerCreateResponse struct { - // ID is the ID of the created container. - ID string `json:"Id"` - - // Warnings are any warnings encountered during the creation of the container. - Warnings []string `json:"Warnings"` -} - -// POST /containers/{name:.*}/exec -type ContainerExecCreateResponse struct { - // ID is the exec ID. - ID string `json:"Id"` - - // Warnings are any warnings encountered during the execution of the command. - Warnings []string `json:"Warnings"` -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/autogen/dockerversion/dockerversion.go b/Godeps/_workspace/src/github.com/docker/docker/autogen/dockerversion/dockerversion.go deleted file mode 100644 index dcb5a928..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/autogen/dockerversion/dockerversion.go +++ /dev/null @@ -1,11 +0,0 @@ -// AUTOGENERATED FILE; see /go/src/github.com/docker/docker/hack/make/.go-autogen -package dockerversion - -var ( - GITCOMMIT string = "617f18b-dirty" - VERSION string = "1.5.0-dev" - - IAMSTATIC string = "true" - INITSHA1 string = "" - INITPATH string = "" -) diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/engine.go b/Godeps/_workspace/src/github.com/docker/docker/engine/engine.go deleted file mode 100644 index 60532349..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/engine.go +++ /dev/null @@ -1,255 +0,0 @@ -package engine - -import ( - "bufio" - "fmt" - "io" - "os" - "sort" - "strings" - "sync" - "time" - - "github.com/docker/docker/pkg/common" - "github.com/docker/docker/pkg/ioutils" -) - -// Installer is a standard interface for objects which can "install" themselves -// on an engine by registering handlers. -// This can be used as an entrypoint for external plugins etc. -type Installer interface { - Install(*Engine) error -} - -type Handler func(*Job) Status - -var globalHandlers map[string]Handler - -func init() { - globalHandlers = make(map[string]Handler) -} - -func Register(name string, handler Handler) error { - _, exists := globalHandlers[name] - if exists { - return fmt.Errorf("Can't overwrite global handler for command %s", name) - } - globalHandlers[name] = handler - return nil -} - -func unregister(name string) { - delete(globalHandlers, name) -} - -// The Engine is the core of Docker. -// It acts as a store for *containers*, and allows manipulation of these -// containers by executing *jobs*. -type Engine struct { - handlers map[string]Handler - catchall Handler - hack Hack // data for temporary hackery (see hack.go) - id string - Stdout io.Writer - Stderr io.Writer - Stdin io.Reader - Logging bool - tasks sync.WaitGroup - l sync.RWMutex // lock for shutdown - shutdownWait sync.WaitGroup - shutdown bool - onShutdown []func() // shutdown handlers -} - -func (eng *Engine) Register(name string, handler Handler) error { - _, exists := eng.handlers[name] - if exists { - return fmt.Errorf("Can't overwrite handler for command %s", name) - } - eng.handlers[name] = handler - return nil -} - -func (eng *Engine) RegisterCatchall(catchall Handler) { - eng.catchall = catchall -} - -// New initializes a new engine. -func New() *Engine { - eng := &Engine{ - handlers: make(map[string]Handler), - id: common.RandomString(), - Stdout: os.Stdout, - Stderr: os.Stderr, - Stdin: os.Stdin, - Logging: true, - } - eng.Register("commands", func(job *Job) Status { - for _, name := range eng.commands() { - job.Printf("%s\n", name) - } - return StatusOK - }) - // Copy existing global handlers - for k, v := range globalHandlers { - eng.handlers[k] = v - } - return eng -} - -func (eng *Engine) String() string { - return fmt.Sprintf("%s", eng.id[:8]) -} - -// Commands returns a list of all currently registered commands, -// sorted alphabetically. -func (eng *Engine) commands() []string { - names := make([]string, 0, len(eng.handlers)) - for name := range eng.handlers { - names = append(names, name) - } - sort.Strings(names) - return names -} - -// Job creates a new job which can later be executed. -// This function mimics `Command` from the standard os/exec package. -func (eng *Engine) Job(name string, args ...string) *Job { - job := &Job{ - Eng: eng, - Name: name, - Args: args, - Stdin: NewInput(), - Stdout: NewOutput(), - Stderr: NewOutput(), - env: &Env{}, - closeIO: true, - - cancelled: make(chan struct{}), - } - if eng.Logging { - job.Stderr.Add(ioutils.NopWriteCloser(eng.Stderr)) - } - - // Catchall is shadowed by specific Register. - if handler, exists := eng.handlers[name]; exists { - job.handler = handler - } else if eng.catchall != nil && name != "" { - // empty job names are illegal, catchall or not. - job.handler = eng.catchall - } - return job -} - -// OnShutdown registers a new callback to be called by Shutdown. -// This is typically used by services to perform cleanup. -func (eng *Engine) OnShutdown(h func()) { - eng.l.Lock() - eng.onShutdown = append(eng.onShutdown, h) - eng.shutdownWait.Add(1) - eng.l.Unlock() -} - -// Shutdown permanently shuts down eng as follows: -// - It refuses all new jobs, permanently. -// - It waits for all active jobs to complete (with no timeout) -// - It calls all shutdown handlers concurrently (if any) -// - It returns when all handlers complete, or after 15 seconds, -// whichever happens first. -func (eng *Engine) Shutdown() { - eng.l.Lock() - if eng.shutdown { - eng.l.Unlock() - eng.shutdownWait.Wait() - return - } - eng.shutdown = true - eng.l.Unlock() - // We don't need to protect the rest with a lock, to allow - // for other calls to immediately fail with "shutdown" instead - // of hanging for 15 seconds. - // This requires all concurrent calls to check for shutdown, otherwise - // it might cause a race. - - // Wait for all jobs to complete. - // Timeout after 5 seconds. - tasksDone := make(chan struct{}) - go func() { - eng.tasks.Wait() - close(tasksDone) - }() - select { - case <-time.After(time.Second * 5): - case <-tasksDone: - } - - // Call shutdown handlers, if any. - // Timeout after 10 seconds. - for _, h := range eng.onShutdown { - go func(h func()) { - h() - eng.shutdownWait.Done() - }(h) - } - done := make(chan struct{}) - go func() { - eng.shutdownWait.Wait() - close(done) - }() - select { - case <-time.After(time.Second * 10): - case <-done: - } - return -} - -// IsShutdown returns true if the engine is in the process -// of shutting down, or already shut down. -// Otherwise it returns false. -func (eng *Engine) IsShutdown() bool { - eng.l.RLock() - defer eng.l.RUnlock() - return eng.shutdown -} - -// ParseJob creates a new job from a text description using a shell-like syntax. -// -// The following syntax is used to parse `input`: -// -// * Words are separated using standard whitespaces as separators. -// * Quotes and backslashes are not interpreted. -// * Words of the form 'KEY=[VALUE]' are added to the job environment. -// * All other words are added to the job arguments. -// -// For example: -// -// job, _ := eng.ParseJob("VERBOSE=1 echo hello TEST=true world") -// -// The resulting job will have: -// job.Args={"echo", "hello", "world"} -// job.Env={"VERBOSE":"1", "TEST":"true"} -// -func (eng *Engine) ParseJob(input string) (*Job, error) { - // FIXME: use a full-featured command parser - scanner := bufio.NewScanner(strings.NewReader(input)) - scanner.Split(bufio.ScanWords) - var ( - cmd []string - env Env - ) - for scanner.Scan() { - word := scanner.Text() - kv := strings.SplitN(word, "=", 2) - if len(kv) == 2 { - env.Set(kv[0], kv[1]) - } else { - cmd = append(cmd, word) - } - } - if len(cmd) == 0 { - return nil, fmt.Errorf("empty command: '%s'", input) - } - job := eng.Job(cmd[0], cmd[1:]...) - job.Env().Init(&env) - return job, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/engine_test.go b/Godeps/_workspace/src/github.com/docker/docker/engine/engine_test.go deleted file mode 100644 index 96c3f0df..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/engine_test.go +++ /dev/null @@ -1,236 +0,0 @@ -package engine - -import ( - "bytes" - "strings" - "testing" - - "github.com/docker/docker/pkg/ioutils" -) - -func TestRegister(t *testing.T) { - if err := Register("dummy1", nil); err != nil { - t.Fatal(err) - } - - if err := Register("dummy1", nil); err == nil { - t.Fatalf("Expecting error, got none") - } - // Register is global so let's cleanup to avoid conflicts - defer unregister("dummy1") - - eng := New() - - //Should fail because global handlers are copied - //at the engine creation - if err := eng.Register("dummy1", nil); err == nil { - t.Fatalf("Expecting error, got none") - } - - if err := eng.Register("dummy2", nil); err != nil { - t.Fatal(err) - } - - if err := eng.Register("dummy2", nil); err == nil { - t.Fatalf("Expecting error, got none") - } - defer unregister("dummy2") -} - -func TestJob(t *testing.T) { - eng := New() - job1 := eng.Job("dummy1", "--level=awesome") - - if job1.handler != nil { - t.Fatalf("job1.handler should be empty") - } - - h := func(j *Job) Status { - j.Printf("%s\n", j.Name) - return 42 - } - - eng.Register("dummy2", h) - defer unregister("dummy2") - job2 := eng.Job("dummy2", "--level=awesome") - - if job2.handler == nil { - t.Fatalf("job2.handler shouldn't be nil") - } - - if job2.handler(job2) != 42 { - t.Fatalf("handler dummy2 was not found in job2") - } -} - -func TestEngineShutdown(t *testing.T) { - eng := New() - if eng.IsShutdown() { - t.Fatalf("Engine should not show as shutdown") - } - eng.Shutdown() - if !eng.IsShutdown() { - t.Fatalf("Engine should show as shutdown") - } -} - -func TestEngineCommands(t *testing.T) { - eng := New() - handler := func(job *Job) Status { return StatusOK } - eng.Register("foo", handler) - eng.Register("bar", handler) - eng.Register("echo", handler) - eng.Register("die", handler) - var output bytes.Buffer - commands := eng.Job("commands") - commands.Stdout.Add(&output) - commands.Run() - expected := "bar\ncommands\ndie\necho\nfoo\n" - if result := output.String(); result != expected { - t.Fatalf("Unexpected output:\nExpected = %v\nResult = %v\n", expected, result) - } -} - -func TestEngineString(t *testing.T) { - eng1 := New() - eng2 := New() - s1 := eng1.String() - s2 := eng2.String() - if eng1 == eng2 { - t.Fatalf("Different engines should have different names (%v == %v)", s1, s2) - } -} - -func TestParseJob(t *testing.T) { - eng := New() - // Verify that the resulting job calls to the right place - var called bool - eng.Register("echo", func(job *Job) Status { - called = true - return StatusOK - }) - input := "echo DEBUG=1 hello world VERBOSITY=42" - job, err := eng.ParseJob(input) - if err != nil { - t.Fatal(err) - } - if job.Name != "echo" { - t.Fatalf("Invalid job name: %v", job.Name) - } - if strings.Join(job.Args, ":::") != "hello:::world" { - t.Fatalf("Invalid job args: %v", job.Args) - } - if job.Env().Get("DEBUG") != "1" { - t.Fatalf("Invalid job env: %v", job.Env) - } - if job.Env().Get("VERBOSITY") != "42" { - t.Fatalf("Invalid job env: %v", job.Env) - } - if len(job.Env().Map()) != 2 { - t.Fatalf("Invalid job env: %v", job.Env) - } - if err := job.Run(); err != nil { - t.Fatal(err) - } - if !called { - t.Fatalf("Job was not called") - } -} - -func TestCatchallEmptyName(t *testing.T) { - eng := New() - var called bool - eng.RegisterCatchall(func(job *Job) Status { - called = true - return StatusOK - }) - err := eng.Job("").Run() - if err == nil { - t.Fatalf("Engine.Job(\"\").Run() should return an error") - } - if called { - t.Fatalf("Engine.Job(\"\").Run() should return an error") - } -} - -// Ensure that a job within a job both using the same underlying standard -// output writer does not close the output of the outer job when the inner -// job's stdout is wrapped with a NopCloser. When not wrapped, it should -// close the outer job's output. -func TestNestedJobSharedOutput(t *testing.T) { - var ( - outerHandler Handler - innerHandler Handler - wrapOutput bool - ) - - outerHandler = func(job *Job) Status { - job.Stdout.Write([]byte("outer1")) - - innerJob := job.Eng.Job("innerJob") - - if wrapOutput { - innerJob.Stdout.Add(ioutils.NopWriteCloser(job.Stdout)) - } else { - innerJob.Stdout.Add(job.Stdout) - } - - if err := innerJob.Run(); err != nil { - t.Fatal(err) - } - - // If wrapOutput was *false* this write will do nothing. - // FIXME (jlhawn): It should cause an error to write to - // closed output. - job.Stdout.Write([]byte(" outer2")) - - return StatusOK - } - - innerHandler = func(job *Job) Status { - job.Stdout.Write([]byte(" inner")) - - return StatusOK - } - - eng := New() - eng.Register("outerJob", outerHandler) - eng.Register("innerJob", innerHandler) - - // wrapOutput starts *false* so the expected - // output of running the outer job will be: - // - // "outer1 inner" - // - outBuf := new(bytes.Buffer) - outerJob := eng.Job("outerJob") - outerJob.Stdout.Add(outBuf) - - if err := outerJob.Run(); err != nil { - t.Fatal(err) - } - - expectedOutput := "outer1 inner" - if outBuf.String() != expectedOutput { - t.Fatalf("expected job output to be %q, got %q", expectedOutput, outBuf.String()) - } - - // Set wrapOutput to true so that the expected - // output of running the outer job will be: - // - // "outer1 inner outer2" - // - wrapOutput = true - outBuf.Reset() - outerJob = eng.Job("outerJob") - outerJob.Stdout.Add(outBuf) - - if err := outerJob.Run(); err != nil { - t.Fatal(err) - } - - expectedOutput = "outer1 inner outer2" - if outBuf.String() != expectedOutput { - t.Fatalf("expected job output to be %q, got %q", expectedOutput, outBuf.String()) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/env.go b/Godeps/_workspace/src/github.com/docker/docker/engine/env.go deleted file mode 100644 index a671f13c..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/env.go +++ /dev/null @@ -1,310 +0,0 @@ -package engine - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "strconv" - "strings" - "time" - - "github.com/docker/docker/utils" -) - -type Env []string - -// Get returns the last value associated with the given key. If there are no -// values associated with the key, Get returns the empty string. -func (env *Env) Get(key string) (value string) { - // not using Map() because of the extra allocations https://github.com/docker/docker/pull/7488#issuecomment-51638315 - for _, kv := range *env { - if strings.Index(kv, "=") == -1 { - continue - } - parts := strings.SplitN(kv, "=", 2) - if parts[0] != key { - continue - } - if len(parts) < 2 { - value = "" - } else { - value = parts[1] - } - } - return -} - -func (env *Env) Exists(key string) bool { - _, exists := env.Map()[key] - return exists -} - -// Len returns the number of keys in the environment. -// Note that len(env) might be different from env.Len(), -// because the same key might be set multiple times. -func (env *Env) Len() int { - return len(env.Map()) -} - -func (env *Env) Init(src *Env) { - (*env) = make([]string, 0, len(*src)) - for _, val := range *src { - (*env) = append((*env), val) - } -} - -func (env *Env) GetBool(key string) (value bool) { - s := strings.ToLower(strings.Trim(env.Get(key), " \t")) - if s == "" || s == "0" || s == "no" || s == "false" || s == "none" { - return false - } - return true -} - -func (env *Env) SetBool(key string, value bool) { - if value { - env.Set(key, "1") - } else { - env.Set(key, "0") - } -} - -func (env *Env) GetTime(key string) (time.Time, error) { - t, err := time.Parse(time.RFC3339Nano, env.Get(key)) - return t, err -} - -func (env *Env) SetTime(key string, t time.Time) { - env.Set(key, t.Format(time.RFC3339Nano)) -} - -func (env *Env) GetInt(key string) int { - return int(env.GetInt64(key)) -} - -func (env *Env) GetInt64(key string) int64 { - s := strings.Trim(env.Get(key), " \t") - val, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return 0 - } - return val -} - -func (env *Env) SetInt(key string, value int) { - env.Set(key, fmt.Sprintf("%d", value)) -} - -func (env *Env) SetInt64(key string, value int64) { - env.Set(key, fmt.Sprintf("%d", value)) -} - -// Returns nil if key not found -func (env *Env) GetList(key string) []string { - sval := env.Get(key) - if sval == "" { - return nil - } - l := make([]string, 0, 1) - if err := json.Unmarshal([]byte(sval), &l); err != nil { - l = append(l, sval) - } - return l -} - -func (env *Env) GetSubEnv(key string) *Env { - sval := env.Get(key) - if sval == "" { - return nil - } - buf := bytes.NewBufferString(sval) - var sub Env - if err := sub.Decode(buf); err != nil { - return nil - } - return &sub -} - -func (env *Env) SetSubEnv(key string, sub *Env) error { - var buf bytes.Buffer - if err := sub.Encode(&buf); err != nil { - return err - } - env.Set(key, string(buf.Bytes())) - return nil -} - -func (env *Env) GetJson(key string, iface interface{}) error { - sval := env.Get(key) - if sval == "" { - return nil - } - return json.Unmarshal([]byte(sval), iface) -} - -func (env *Env) SetJson(key string, value interface{}) error { - sval, err := json.Marshal(value) - if err != nil { - return err - } - env.Set(key, string(sval)) - return nil -} - -func (env *Env) SetList(key string, value []string) error { - return env.SetJson(key, value) -} - -func (env *Env) Set(key, value string) { - *env = append(*env, key+"="+value) -} - -func NewDecoder(src io.Reader) *Decoder { - return &Decoder{ - json.NewDecoder(src), - } -} - -type Decoder struct { - *json.Decoder -} - -func (decoder *Decoder) Decode() (*Env, error) { - m := make(map[string]interface{}) - if err := decoder.Decoder.Decode(&m); err != nil { - return nil, err - } - env := &Env{} - for key, value := range m { - env.SetAuto(key, value) - } - return env, nil -} - -// DecodeEnv decodes `src` as a json dictionary, and adds -// each decoded key-value pair to the environment. -// -// If `src` cannot be decoded as a json dictionary, an error -// is returned. -func (env *Env) Decode(src io.Reader) error { - m := make(map[string]interface{}) - if err := json.NewDecoder(src).Decode(&m); err != nil { - return err - } - for k, v := range m { - env.SetAuto(k, v) - } - return nil -} - -func (env *Env) SetAuto(k string, v interface{}) { - // Issue 7941 - if the value in the incoming JSON is null then treat it - // as if they never specified the property at all. - if v == nil { - return - } - - // FIXME: we fix-convert float values to int, because - // encoding/json decodes integers to float64, but cannot encode them back. - // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) - if fval, ok := v.(float64); ok { - env.SetInt64(k, int64(fval)) - } else if sval, ok := v.(string); ok { - env.Set(k, sval) - } else if val, err := json.Marshal(v); err == nil { - env.Set(k, string(val)) - } else { - env.Set(k, fmt.Sprintf("%v", v)) - } -} - -func changeFloats(v interface{}) interface{} { - switch v := v.(type) { - case float64: - return int(v) - case map[string]interface{}: - for key, val := range v { - v[key] = changeFloats(val) - } - case []interface{}: - for idx, val := range v { - v[idx] = changeFloats(val) - } - } - return v -} - -func (env *Env) Encode(dst io.Writer) error { - m := make(map[string]interface{}) - for k, v := range env.Map() { - var val interface{} - if err := json.Unmarshal([]byte(v), &val); err == nil { - // FIXME: we fix-convert float values to int, because - // encoding/json decodes integers to float64, but cannot encode them back. - // (See http://golang.org/src/pkg/encoding/json/decode.go#L46) - m[k] = changeFloats(val) - } else { - m[k] = v - } - } - if err := json.NewEncoder(dst).Encode(&m); err != nil { - return err - } - return nil -} - -func (env *Env) WriteTo(dst io.Writer) (int64, error) { - wc := utils.NewWriteCounter(dst) - err := env.Encode(wc) - return wc.Count, err -} - -func (env *Env) Import(src interface{}) (err error) { - defer func() { - if err != nil { - err = fmt.Errorf("ImportEnv: %s", err) - } - }() - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(src); err != nil { - return err - } - if err := env.Decode(&buf); err != nil { - return err - } - return nil -} - -func (env *Env) Map() map[string]string { - m := make(map[string]string) - for _, kv := range *env { - parts := strings.SplitN(kv, "=", 2) - m[parts[0]] = parts[1] - } - return m -} - -// MultiMap returns a representation of env as a -// map of string arrays, keyed by string. -// This is the same structure as http headers for example, -// which allow each key to have multiple values. -func (env *Env) MultiMap() map[string][]string { - m := make(map[string][]string) - for _, kv := range *env { - parts := strings.SplitN(kv, "=", 2) - m[parts[0]] = append(m[parts[0]], parts[1]) - } - return m -} - -// InitMultiMap removes all values in env, then initializes -// new values from the contents of m. -func (env *Env) InitMultiMap(m map[string][]string) { - (*env) = make([]string, 0, len(m)) - for k, vals := range m { - for _, v := range vals { - env.Set(k, v) - } - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/env_test.go b/Godeps/_workspace/src/github.com/docker/docker/engine/env_test.go deleted file mode 100644 index 5182783b..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/env_test.go +++ /dev/null @@ -1,358 +0,0 @@ -package engine - -import ( - "bytes" - "encoding/json" - "math/rand" - "testing" - "time" -) - -const chars = "abcdefghijklmnopqrstuvwxyz" + - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + - "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " - -// RandomString returns random string of specified length -func RandomString(length int) string { - res := make([]byte, length) - for i := 0; i < length; i++ { - res[i] = chars[rand.Intn(len(chars))] - } - return string(res) -} - -func TestEnvLenZero(t *testing.T) { - env := &Env{} - if env.Len() != 0 { - t.Fatalf("%d", env.Len()) - } -} - -func TestEnvLenNotZero(t *testing.T) { - env := &Env{} - env.Set("foo", "bar") - env.Set("ga", "bu") - if env.Len() != 2 { - t.Fatalf("%d", env.Len()) - } -} - -func TestEnvLenDup(t *testing.T) { - env := &Env{ - "foo=bar", - "foo=baz", - "a=b", - } - // len(env) != env.Len() - if env.Len() != 2 { - t.Fatalf("%d", env.Len()) - } -} - -func TestEnvGetDup(t *testing.T) { - env := &Env{ - "foo=bar", - "foo=baz", - "foo=bif", - } - expected := "bif" - if v := env.Get("foo"); v != expected { - t.Fatalf("expect %q, got %q", expected, v) - } -} - -func TestNewJob(t *testing.T) { - job := mkJob(t, "dummy", "--level=awesome") - if job.Name != "dummy" { - t.Fatalf("Wrong job name: %s", job.Name) - } - if len(job.Args) != 1 { - t.Fatalf("Wrong number of job arguments: %d", len(job.Args)) - } - if job.Args[0] != "--level=awesome" { - t.Fatalf("Wrong job arguments: %s", job.Args[0]) - } -} - -func TestSetenv(t *testing.T) { - job := mkJob(t, "dummy") - job.Setenv("foo", "bar") - if val := job.Getenv("foo"); val != "bar" { - t.Fatalf("Getenv returns incorrect value: %s", val) - } - - job.Setenv("bar", "") - if val := job.Getenv("bar"); val != "" { - t.Fatalf("Getenv returns incorrect value: %s", val) - } - if val := job.Getenv("nonexistent"); val != "" { - t.Fatalf("Getenv returns incorrect value: %s", val) - } -} - -func TestSetenvBool(t *testing.T) { - job := mkJob(t, "dummy") - job.SetenvBool("foo", true) - if val := job.GetenvBool("foo"); !val { - t.Fatalf("GetenvBool returns incorrect value: %t", val) - } - - job.SetenvBool("bar", false) - if val := job.GetenvBool("bar"); val { - t.Fatalf("GetenvBool returns incorrect value: %t", val) - } - - if val := job.GetenvBool("nonexistent"); val { - t.Fatalf("GetenvBool returns incorrect value: %t", val) - } -} - -func TestSetenvTime(t *testing.T) { - job := mkJob(t, "dummy") - - now := time.Now() - job.SetenvTime("foo", now) - if val, err := job.GetenvTime("foo"); err != nil { - t.Fatalf("GetenvTime failed to parse: %v", err) - } else { - nowStr := now.Format(time.RFC3339) - valStr := val.Format(time.RFC3339) - if nowStr != valStr { - t.Fatalf("GetenvTime returns incorrect value: %s, Expected: %s", valStr, nowStr) - } - } - - job.Setenv("bar", "Obviously I'm not a date") - if val, err := job.GetenvTime("bar"); err == nil { - t.Fatalf("GetenvTime was supposed to fail, instead returned: %s", val) - } -} - -func TestSetenvInt(t *testing.T) { - job := mkJob(t, "dummy") - - job.SetenvInt("foo", -42) - if val := job.GetenvInt("foo"); val != -42 { - t.Fatalf("GetenvInt returns incorrect value: %d", val) - } - - job.SetenvInt("bar", 42) - if val := job.GetenvInt("bar"); val != 42 { - t.Fatalf("GetenvInt returns incorrect value: %d", val) - } - if val := job.GetenvInt("nonexistent"); val != 0 { - t.Fatalf("GetenvInt returns incorrect value: %d", val) - } -} - -func TestSetenvList(t *testing.T) { - job := mkJob(t, "dummy") - - job.SetenvList("foo", []string{"bar"}) - if val := job.GetenvList("foo"); len(val) != 1 || val[0] != "bar" { - t.Fatalf("GetenvList returns incorrect value: %v", val) - } - - job.SetenvList("bar", nil) - if val := job.GetenvList("bar"); val != nil { - t.Fatalf("GetenvList returns incorrect value: %v", val) - } - if val := job.GetenvList("nonexistent"); val != nil { - t.Fatalf("GetenvList returns incorrect value: %v", val) - } -} - -func TestEnviron(t *testing.T) { - job := mkJob(t, "dummy") - job.Setenv("foo", "bar") - val, exists := job.Environ()["foo"] - if !exists { - t.Fatalf("foo not found in the environ") - } - if val != "bar" { - t.Fatalf("bar not found in the environ") - } -} - -func TestMultiMap(t *testing.T) { - e := &Env{} - e.Set("foo", "bar") - e.Set("bar", "baz") - e.Set("hello", "world") - m := e.MultiMap() - e2 := &Env{} - e2.Set("old_key", "something something something") - e2.InitMultiMap(m) - if v := e2.Get("old_key"); v != "" { - t.Fatalf("%#v", v) - } - if v := e2.Get("bar"); v != "baz" { - t.Fatalf("%#v", v) - } - if v := e2.Get("hello"); v != "world" { - t.Fatalf("%#v", v) - } -} - -func testMap(l int) [][2]string { - res := make([][2]string, l) - for i := 0; i < l; i++ { - t := [2]string{RandomString(5), RandomString(20)} - res[i] = t - } - return res -} - -func BenchmarkSet(b *testing.B) { - fix := testMap(100) - b.ResetTimer() - for i := 0; i < b.N; i++ { - env := &Env{} - for _, kv := range fix { - env.Set(kv[0], kv[1]) - } - } -} - -func BenchmarkSetJson(b *testing.B) { - fix := testMap(100) - type X struct { - f string - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - env := &Env{} - for _, kv := range fix { - if err := env.SetJson(kv[0], X{kv[1]}); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkGet(b *testing.B) { - fix := testMap(100) - env := &Env{} - for _, kv := range fix { - env.Set(kv[0], kv[1]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, kv := range fix { - env.Get(kv[0]) - } - } -} - -func BenchmarkGetJson(b *testing.B) { - fix := testMap(100) - env := &Env{} - type X struct { - f string - } - for _, kv := range fix { - env.SetJson(kv[0], X{kv[1]}) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, kv := range fix { - if err := env.GetJson(kv[0], &X{}); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkEncode(b *testing.B) { - fix := testMap(100) - env := &Env{} - type X struct { - f string - } - // half a json - for i, kv := range fix { - if i%2 != 0 { - if err := env.SetJson(kv[0], X{kv[1]}); err != nil { - b.Fatal(err) - } - continue - } - env.Set(kv[0], kv[1]) - } - var writer bytes.Buffer - b.ResetTimer() - for i := 0; i < b.N; i++ { - env.Encode(&writer) - writer.Reset() - } -} - -func BenchmarkDecode(b *testing.B) { - fix := testMap(100) - env := &Env{} - type X struct { - f string - } - // half a json - for i, kv := range fix { - if i%2 != 0 { - if err := env.SetJson(kv[0], X{kv[1]}); err != nil { - b.Fatal(err) - } - continue - } - env.Set(kv[0], kv[1]) - } - var writer bytes.Buffer - env.Encode(&writer) - denv := &Env{} - reader := bytes.NewReader(writer.Bytes()) - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := denv.Decode(reader) - if err != nil { - b.Fatal(err) - } - reader.Seek(0, 0) - } -} - -func TestLongNumbers(t *testing.T) { - type T struct { - TestNum int64 - } - v := T{67108864} - var buf bytes.Buffer - e := &Env{} - e.SetJson("Test", v) - if err := e.Encode(&buf); err != nil { - t.Fatal(err) - } - res := make(map[string]T) - if err := json.Unmarshal(buf.Bytes(), &res); err != nil { - t.Fatal(err) - } - if res["Test"].TestNum != v.TestNum { - t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum) - } -} - -func TestLongNumbersArray(t *testing.T) { - type T struct { - TestNum []int64 - } - v := T{[]int64{67108864}} - var buf bytes.Buffer - e := &Env{} - e.SetJson("Test", v) - if err := e.Encode(&buf); err != nil { - t.Fatal(err) - } - res := make(map[string]T) - if err := json.Unmarshal(buf.Bytes(), &res); err != nil { - t.Fatal(err) - } - if res["Test"].TestNum[0] != v.TestNum[0] { - t.Fatalf("TestNum %d, expected %d", res["Test"].TestNum, v.TestNum) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/hack.go b/Godeps/_workspace/src/github.com/docker/docker/engine/hack.go deleted file mode 100644 index be4fadbe..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/hack.go +++ /dev/null @@ -1,21 +0,0 @@ -package engine - -type Hack map[string]interface{} - -func (eng *Engine) Hack_GetGlobalVar(key string) interface{} { - if eng.hack == nil { - return nil - } - val, exists := eng.hack[key] - if !exists { - return nil - } - return val -} - -func (eng *Engine) Hack_SetGlobalVar(key string, val interface{}) { - if eng.hack == nil { - eng.hack = make(Hack) - } - eng.hack[key] = val -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/helpers_test.go b/Godeps/_workspace/src/github.com/docker/docker/engine/helpers_test.go deleted file mode 100644 index cfa11da7..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/helpers_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package engine - -import ( - "testing" -) - -var globalTestID string - -func mkJob(t *testing.T, name string, args ...string) *Job { - return New().Job(name, args...) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/http.go b/Godeps/_workspace/src/github.com/docker/docker/engine/http.go deleted file mode 100644 index 7e4dcd7b..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/http.go +++ /dev/null @@ -1,42 +0,0 @@ -package engine - -import ( - "net/http" - "path" -) - -// ServeHTTP executes a job as specified by the http request `r`, and sends the -// result as an http response. -// This method allows an Engine instance to be passed as a standard http.Handler interface. -// -// Note that the protocol used in this method is a convenience wrapper and is not the canonical -// implementation of remote job execution. This is because HTTP/1 does not handle stream multiplexing, -// and so cannot differentiate stdout from stderr. Additionally, headers cannot be added to a response -// once data has been written to the body, which makes it inconvenient to return metadata such -// as the exit status. -// -func (eng *Engine) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var ( - jobName = path.Base(r.URL.Path) - jobArgs, exists = r.URL.Query()["a"] - ) - if !exists { - jobArgs = []string{} - } - w.Header().Set("Job-Name", jobName) - for _, arg := range jobArgs { - w.Header().Add("Job-Args", arg) - } - job := eng.Job(jobName, jobArgs...) - job.Stdout.Add(w) - job.Stderr.Add(w) - // FIXME: distinguish job status from engine error in Run() - // The former should be passed as a special header, the former - // should cause a 500 status - w.WriteHeader(http.StatusOK) - // The exit status cannot be sent reliably with HTTP1, because headers - // can only be sent before the body. - // (we could possibly use http footers via chunked encoding, but I couldn't find - // how to use them in net/http) - job.Run() -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/job.go b/Godeps/_workspace/src/github.com/docker/docker/engine/job.go deleted file mode 100644 index ecb68c3e..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/job.go +++ /dev/null @@ -1,269 +0,0 @@ -package engine - -import ( - "bytes" - "fmt" - "io" - "strings" - "sync" - "time" - - log "github.com/Sirupsen/logrus" -) - -// A job is the fundamental unit of work in the docker engine. -// Everything docker can do should eventually be exposed as a job. -// For example: execute a process in a container, create a new container, -// download an archive from the internet, serve the http api, etc. -// -// The job API is designed after unix processes: a job has a name, arguments, -// environment variables, standard streams for input, output and error, and -// an exit status which can indicate success (0) or error (anything else). -// -// For status, 0 indicates success, and any other integers indicates an error. -// This allows for richer error reporting. -// -type Job struct { - Eng *Engine - Name string - Args []string - env *Env - Stdout *Output - Stderr *Output - Stdin *Input - handler Handler - status Status - end time.Time - closeIO bool - - // When closed, the job has been cancelled. - // Note: not all jobs implement cancellation. - // See Job.Cancel() and Job.WaitCancelled() - cancelled chan struct{} - cancelOnce sync.Once -} - -type Status int - -const ( - StatusOK Status = 0 - StatusErr Status = 1 - StatusNotFound Status = 127 -) - -// Run executes the job and blocks until the job completes. -// If the job returns a failure status, an error is returned -// which includes the status. -func (job *Job) Run() error { - if job.Eng.IsShutdown() && !job.GetenvBool("overrideShutdown") { - return fmt.Errorf("engine is shutdown") - } - // FIXME: this is a temporary workaround to avoid Engine.Shutdown - // waiting 5 seconds for server/api.ServeApi to complete (which it never will) - // everytime the daemon is cleanly restarted. - // The permanent fix is to implement Job.Stop and Job.OnStop so that - // ServeApi can cooperate and terminate cleanly. - if job.Name != "serveapi" { - job.Eng.l.Lock() - job.Eng.tasks.Add(1) - job.Eng.l.Unlock() - defer job.Eng.tasks.Done() - } - // FIXME: make this thread-safe - // FIXME: implement wait - if !job.end.IsZero() { - return fmt.Errorf("%s: job has already completed", job.Name) - } - // Log beginning and end of the job - if job.Eng.Logging { - log.Infof("+job %s", job.CallString()) - defer func() { - log.Infof("-job %s%s", job.CallString(), job.StatusString()) - }() - } - var errorMessage = bytes.NewBuffer(nil) - job.Stderr.Add(errorMessage) - if job.handler == nil { - job.Errorf("%s: command not found", job.Name) - job.status = 127 - } else { - job.status = job.handler(job) - job.end = time.Now() - } - if job.closeIO { - // Wait for all background tasks to complete - if err := job.Stdout.Close(); err != nil { - return err - } - if err := job.Stderr.Close(); err != nil { - return err - } - if err := job.Stdin.Close(); err != nil { - return err - } - } - if job.status != 0 { - return fmt.Errorf("%s", Tail(errorMessage, 1)) - } - - return nil -} - -func (job *Job) CallString() string { - return fmt.Sprintf("%s(%s)", job.Name, strings.Join(job.Args, ", ")) -} - -func (job *Job) StatusString() string { - // If the job hasn't completed, status string is empty - if job.end.IsZero() { - return "" - } - var okerr string - if job.status == StatusOK { - okerr = "OK" - } else { - okerr = "ERR" - } - return fmt.Sprintf(" = %s (%d)", okerr, job.status) -} - -// String returns a human-readable description of `job` -func (job *Job) String() string { - return fmt.Sprintf("%s.%s%s", job.Eng, job.CallString(), job.StatusString()) -} - -func (job *Job) Env() *Env { - return job.env -} - -func (job *Job) EnvExists(key string) (value bool) { - return job.env.Exists(key) -} - -func (job *Job) Getenv(key string) (value string) { - return job.env.Get(key) -} - -func (job *Job) GetenvBool(key string) (value bool) { - return job.env.GetBool(key) -} - -func (job *Job) SetenvBool(key string, value bool) { - job.env.SetBool(key, value) -} - -func (job *Job) GetenvTime(key string) (value time.Time, err error) { - return job.env.GetTime(key) -} - -func (job *Job) SetenvTime(key string, value time.Time) { - job.env.SetTime(key, value) -} - -func (job *Job) GetenvSubEnv(key string) *Env { - return job.env.GetSubEnv(key) -} - -func (job *Job) SetenvSubEnv(key string, value *Env) error { - return job.env.SetSubEnv(key, value) -} - -func (job *Job) GetenvInt64(key string) int64 { - return job.env.GetInt64(key) -} - -func (job *Job) GetenvInt(key string) int { - return job.env.GetInt(key) -} - -func (job *Job) SetenvInt64(key string, value int64) { - job.env.SetInt64(key, value) -} - -func (job *Job) SetenvInt(key string, value int) { - job.env.SetInt(key, value) -} - -// Returns nil if key not found -func (job *Job) GetenvList(key string) []string { - return job.env.GetList(key) -} - -func (job *Job) GetenvJson(key string, iface interface{}) error { - return job.env.GetJson(key, iface) -} - -func (job *Job) SetenvJson(key string, value interface{}) error { - return job.env.SetJson(key, value) -} - -func (job *Job) SetenvList(key string, value []string) error { - return job.env.SetJson(key, value) -} - -func (job *Job) Setenv(key, value string) { - job.env.Set(key, value) -} - -// DecodeEnv decodes `src` as a json dictionary, and adds -// each decoded key-value pair to the environment. -// -// If `src` cannot be decoded as a json dictionary, an error -// is returned. -func (job *Job) DecodeEnv(src io.Reader) error { - return job.env.Decode(src) -} - -func (job *Job) EncodeEnv(dst io.Writer) error { - return job.env.Encode(dst) -} - -func (job *Job) ImportEnv(src interface{}) (err error) { - return job.env.Import(src) -} - -func (job *Job) Environ() map[string]string { - return job.env.Map() -} - -func (job *Job) Logf(format string, args ...interface{}) (n int, err error) { - prefixedFormat := fmt.Sprintf("[%s] %s\n", job, strings.TrimRight(format, "\n")) - return fmt.Fprintf(job.Stderr, prefixedFormat, args...) -} - -func (job *Job) Printf(format string, args ...interface{}) (n int, err error) { - return fmt.Fprintf(job.Stdout, format, args...) -} - -func (job *Job) Errorf(format string, args ...interface{}) Status { - if format[len(format)-1] != '\n' { - format = format + "\n" - } - fmt.Fprintf(job.Stderr, format, args...) - return StatusErr -} - -func (job *Job) Error(err error) Status { - fmt.Fprintf(job.Stderr, "%s\n", err) - return StatusErr -} - -func (job *Job) StatusCode() int { - return int(job.status) -} - -func (job *Job) SetCloseIO(val bool) { - job.closeIO = val -} - -// When called, causes the Job.WaitCancelled channel to unblock. -func (job *Job) Cancel() { - job.cancelOnce.Do(func() { - close(job.cancelled) - }) -} - -// Returns a channel which is closed ("never blocks") when the job is cancelled. -func (job *Job) WaitCancelled() <-chan struct{} { - return job.cancelled -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/job_test.go b/Godeps/_workspace/src/github.com/docker/docker/engine/job_test.go deleted file mode 100644 index 9f8c7609..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/job_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package engine - -import ( - "bytes" - "fmt" - "testing" -) - -func TestJobStatusOK(t *testing.T) { - eng := New() - eng.Register("return_ok", func(job *Job) Status { return StatusOK }) - err := eng.Job("return_ok").Run() - if err != nil { - t.Fatalf("Expected: err=%v\nReceived: err=%v", nil, err) - } -} - -func TestJobStatusErr(t *testing.T) { - eng := New() - eng.Register("return_err", func(job *Job) Status { return StatusErr }) - err := eng.Job("return_err").Run() - if err == nil { - t.Fatalf("When a job returns StatusErr, Run() should return an error") - } -} - -func TestJobStatusNotFound(t *testing.T) { - eng := New() - eng.Register("return_not_found", func(job *Job) Status { return StatusNotFound }) - err := eng.Job("return_not_found").Run() - if err == nil { - t.Fatalf("When a job returns StatusNotFound, Run() should return an error") - } -} - -func TestJobStdoutString(t *testing.T) { - eng := New() - // FIXME: test multiple combinations of output and status - eng.Register("say_something_in_stdout", func(job *Job) Status { - job.Printf("Hello world\n") - return StatusOK - }) - - job := eng.Job("say_something_in_stdout") - var outputBuffer = bytes.NewBuffer(nil) - job.Stdout.Add(outputBuffer) - if err := job.Run(); err != nil { - t.Fatal(err) - } - fmt.Println(outputBuffer) - var output = Tail(outputBuffer, 1) - if expectedOutput := "Hello world"; output != expectedOutput { - t.Fatalf("Stdout last line:\nExpected: %v\nReceived: %v", expectedOutput, output) - } -} - -func TestJobStderrString(t *testing.T) { - eng := New() - // FIXME: test multiple combinations of output and status - eng.Register("say_something_in_stderr", func(job *Job) Status { - job.Errorf("Something might happen\nHere it comes!\nOh no...\nSomething happened\n") - return StatusOK - }) - - job := eng.Job("say_something_in_stderr") - var outputBuffer = bytes.NewBuffer(nil) - job.Stderr.Add(outputBuffer) - if err := job.Run(); err != nil { - t.Fatal(err) - } - var output = Tail(outputBuffer, 1) - if expectedOutput := "Something happened"; output != expectedOutput { - t.Fatalf("Stderr last line:\nExpected: %v\nReceived: %v", expectedOutput, output) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/shutdown_test.go b/Godeps/_workspace/src/github.com/docker/docker/engine/shutdown_test.go deleted file mode 100644 index 13d80492..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/shutdown_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package engine - -import ( - "testing" - "time" -) - -func TestShutdownEmpty(t *testing.T) { - eng := New() - if eng.IsShutdown() { - t.Fatalf("IsShutdown should be false") - } - eng.Shutdown() - if !eng.IsShutdown() { - t.Fatalf("IsShutdown should be true") - } -} - -func TestShutdownAfterRun(t *testing.T) { - eng := New() - var called bool - eng.Register("foo", func(job *Job) Status { - called = true - return StatusOK - }) - if err := eng.Job("foo").Run(); err != nil { - t.Fatal(err) - } - eng.Shutdown() - if err := eng.Job("foo").Run(); err == nil { - t.Fatalf("%#v", *eng) - } -} - -// An approximate and racy, but better-than-nothing test that -// -func TestShutdownDuringRun(t *testing.T) { - var ( - jobDelay time.Duration = 500 * time.Millisecond - jobDelayLow time.Duration = 100 * time.Millisecond - jobDelayHigh time.Duration = 700 * time.Millisecond - ) - eng := New() - var completed bool - eng.Register("foo", func(job *Job) Status { - time.Sleep(jobDelay) - completed = true - return StatusOK - }) - go eng.Job("foo").Run() - time.Sleep(50 * time.Millisecond) - done := make(chan struct{}) - var startShutdown time.Time - go func() { - startShutdown = time.Now() - eng.Shutdown() - close(done) - }() - time.Sleep(50 * time.Millisecond) - if err := eng.Job("foo").Run(); err == nil { - t.Fatalf("run on shutdown should fail: %#v", *eng) - } - <-done - // Verify that Shutdown() blocks for roughly 500ms, instead - // of returning almost instantly. - // - // We use >100ms to leave ample margin for race conditions between - // goroutines. It's possible (but unlikely in reasonable testing - // conditions), that this test will cause a false positive or false - // negative. But it's probably better than not having any test - // for the 99.999% of time where testing conditions are reasonable. - if d := time.Since(startShutdown); d.Nanoseconds() < jobDelayLow.Nanoseconds() { - t.Fatalf("shutdown did not block long enough: %v", d) - } else if d.Nanoseconds() > jobDelayHigh.Nanoseconds() { - t.Fatalf("shutdown blocked too long: %v", d) - } - if !completed { - t.Fatalf("job did not complete") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/streams.go b/Godeps/_workspace/src/github.com/docker/docker/engine/streams.go deleted file mode 100644 index 216fb898..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/streams.go +++ /dev/null @@ -1,225 +0,0 @@ -package engine - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "strings" - "sync" - "unicode" -) - -type Output struct { - sync.Mutex - dests []io.Writer - tasks sync.WaitGroup - used bool -} - -// Tail returns the n last lines of a buffer -// stripped out of trailing white spaces, if any. -// -// if n <= 0, returns an empty string -func Tail(buffer *bytes.Buffer, n int) string { - if n <= 0 { - return "" - } - s := strings.TrimRightFunc(buffer.String(), unicode.IsSpace) - i := len(s) - 1 - for ; i >= 0 && n > 0; i-- { - if s[i] == '\n' { - n-- - if n == 0 { - break - } - } - } - // when i == -1, return the whole string which is s[0:] - return s[i+1:] -} - -// NewOutput returns a new Output object with no destinations attached. -// Writing to an empty Output will cause the written data to be discarded. -func NewOutput() *Output { - return &Output{} -} - -// Return true if something was written on this output -func (o *Output) Used() bool { - o.Lock() - defer o.Unlock() - return o.used -} - -// Add attaches a new destination to the Output. Any data subsequently written -// to the output will be written to the new destination in addition to all the others. -// This method is thread-safe. -func (o *Output) Add(dst io.Writer) { - o.Lock() - defer o.Unlock() - o.dests = append(o.dests, dst) -} - -// Set closes and remove existing destination and then attaches a new destination to -// the Output. Any data subsequently written to the output will be written to the new -// destination in addition to all the others. This method is thread-safe. -func (o *Output) Set(dst io.Writer) { - o.Close() - o.Lock() - defer o.Unlock() - o.dests = []io.Writer{dst} -} - -// AddPipe creates an in-memory pipe with io.Pipe(), adds its writing end as a destination, -// and returns its reading end for consumption by the caller. -// This is a rough equivalent similar to Cmd.StdoutPipe() in the standard os/exec package. -// This method is thread-safe. -func (o *Output) AddPipe() (io.Reader, error) { - r, w := io.Pipe() - o.Add(w) - return r, nil -} - -// Write writes the same data to all registered destinations. -// This method is thread-safe. -func (o *Output) Write(p []byte) (n int, err error) { - o.Lock() - defer o.Unlock() - o.used = true - var firstErr error - for _, dst := range o.dests { - _, err := dst.Write(p) - if err != nil && firstErr == nil { - firstErr = err - } - } - return len(p), firstErr -} - -// Close unregisters all destinations and waits for all background -// AddTail and AddString tasks to complete. -// The Close method of each destination is called if it exists. -func (o *Output) Close() error { - o.Lock() - defer o.Unlock() - var firstErr error - for _, dst := range o.dests { - if closer, ok := dst.(io.Closer); ok { - err := closer.Close() - if err != nil && firstErr == nil { - firstErr = err - } - } - } - o.tasks.Wait() - o.dests = nil - return firstErr -} - -type Input struct { - src io.Reader - sync.Mutex -} - -// NewInput returns a new Input object with no source attached. -// Reading to an empty Input will return io.EOF. -func NewInput() *Input { - return &Input{} -} - -// Read reads from the input in a thread-safe way. -func (i *Input) Read(p []byte) (n int, err error) { - i.Mutex.Lock() - defer i.Mutex.Unlock() - if i.src == nil { - return 0, io.EOF - } - return i.src.Read(p) -} - -// Closes the src -// Not thread safe on purpose -func (i *Input) Close() error { - if i.src != nil { - if closer, ok := i.src.(io.Closer); ok { - return closer.Close() - } - } - return nil -} - -// Add attaches a new source to the input. -// Add can only be called once per input. Subsequent calls will -// return an error. -func (i *Input) Add(src io.Reader) error { - i.Mutex.Lock() - defer i.Mutex.Unlock() - if i.src != nil { - return fmt.Errorf("Maximum number of sources reached: 1") - } - i.src = src - return nil -} - -// AddEnv starts a new goroutine which will decode all subsequent data -// as a stream of json-encoded objects, and point `dst` to the last -// decoded object. -// The result `env` can be queried using the type-neutral Env interface. -// It is not safe to query `env` until the Output is closed. -func (o *Output) AddEnv() (dst *Env, err error) { - src, err := o.AddPipe() - if err != nil { - return nil, err - } - dst = &Env{} - o.tasks.Add(1) - go func() { - defer o.tasks.Done() - decoder := NewDecoder(src) - for { - env, err := decoder.Decode() - if err != nil { - return - } - *dst = *env - } - }() - return dst, nil -} - -func (o *Output) AddListTable() (dst *Table, err error) { - src, err := o.AddPipe() - if err != nil { - return nil, err - } - dst = NewTable("", 0) - o.tasks.Add(1) - go func() { - defer o.tasks.Done() - content, err := ioutil.ReadAll(src) - if err != nil { - return - } - if _, err := dst.ReadListFrom(content); err != nil { - return - } - }() - return dst, nil -} - -func (o *Output) AddTable() (dst *Table, err error) { - src, err := o.AddPipe() - if err != nil { - return nil, err - } - dst = NewTable("", 0) - o.tasks.Add(1) - go func() { - defer o.tasks.Done() - if _, err := dst.ReadFrom(src); err != nil { - return - } - }() - return dst, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/streams_test.go b/Godeps/_workspace/src/github.com/docker/docker/engine/streams_test.go deleted file mode 100644 index 476a721b..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/streams_test.go +++ /dev/null @@ -1,215 +0,0 @@ -package engine - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "strings" - "testing" -) - -type sentinelWriteCloser struct { - calledWrite bool - calledClose bool -} - -func (w *sentinelWriteCloser) Write(p []byte) (int, error) { - w.calledWrite = true - return len(p), nil -} - -func (w *sentinelWriteCloser) Close() error { - w.calledClose = true - return nil -} - -func TestOutputAddEnv(t *testing.T) { - input := "{\"foo\": \"bar\", \"answer_to_life_the_universe_and_everything\": 42}" - o := NewOutput() - result, err := o.AddEnv() - if err != nil { - t.Fatal(err) - } - o.Write([]byte(input)) - o.Close() - if v := result.Get("foo"); v != "bar" { - t.Errorf("Expected %v, got %v", "bar", v) - } - if v := result.GetInt("answer_to_life_the_universe_and_everything"); v != 42 { - t.Errorf("Expected %v, got %v", 42, v) - } - if v := result.Get("this-value-doesnt-exist"); v != "" { - t.Errorf("Expected %v, got %v", "", v) - } -} - -func TestOutputAddClose(t *testing.T) { - o := NewOutput() - var s sentinelWriteCloser - o.Add(&s) - if err := o.Close(); err != nil { - t.Fatal(err) - } - // Write data after the output is closed. - // Write should succeed, but no destination should receive it. - if _, err := o.Write([]byte("foo bar")); err != nil { - t.Fatal(err) - } - if !s.calledClose { - t.Fatal("Output.Close() didn't close the destination") - } -} - -func TestOutputAddPipe(t *testing.T) { - var testInputs = []string{ - "hello, world!", - "One\nTwo\nThree", - "", - "A line\nThen another nl-terminated line\n", - "A line followed by an empty line\n\n", - } - for _, input := range testInputs { - expectedOutput := input - o := NewOutput() - r, err := o.AddPipe() - if err != nil { - t.Fatal(err) - } - go func(o *Output) { - if n, err := o.Write([]byte(input)); err != nil { - t.Error(err) - } else if n != len(input) { - t.Errorf("Expected %d, got %d", len(input), n) - } - if err := o.Close(); err != nil { - t.Error(err) - } - }(o) - output, err := ioutil.ReadAll(r) - if err != nil { - t.Fatal(err) - } - if string(output) != expectedOutput { - t.Errorf("Last line is not stored as return string.\nExpected: '%s'\nGot: '%s'", expectedOutput, output) - } - } -} - -func TestTail(t *testing.T) { - var tests = make(map[string][]string) - tests["hello, world!"] = []string{ - "", - "hello, world!", - "hello, world!", - "hello, world!", - } - tests["One\nTwo\nThree"] = []string{ - "", - "Three", - "Two\nThree", - "One\nTwo\nThree", - } - tests["One\nTwo\n\n\n"] = []string{ - "", - "Two", - "One\nTwo", - } - for input, outputs := range tests { - for n, expectedOutput := range outputs { - output := Tail(bytes.NewBufferString(input), n) - if output != expectedOutput { - t.Errorf("Tail n=%d returned wrong result.\nExpected: '%s'\nGot : '%s'", n, expectedOutput, output) - } - } - } -} - -func lastLine(txt string) string { - scanner := bufio.NewScanner(strings.NewReader(txt)) - var lastLine string - for scanner.Scan() { - lastLine = scanner.Text() - } - return lastLine -} - -func TestOutputAdd(t *testing.T) { - o := NewOutput() - b := &bytes.Buffer{} - o.Add(b) - input := "hello, world!" - if n, err := o.Write([]byte(input)); err != nil { - t.Fatal(err) - } else if n != len(input) { - t.Fatalf("Expected %d, got %d", len(input), n) - } - if output := b.String(); output != input { - t.Fatalf("Received wrong data from Add.\nExpected: '%s'\nGot: '%s'", input, output) - } -} - -func TestOutputWriteError(t *testing.T) { - o := NewOutput() - buf := &bytes.Buffer{} - o.Add(buf) - r, w := io.Pipe() - input := "Hello there" - expectedErr := fmt.Errorf("This is an error") - r.CloseWithError(expectedErr) - o.Add(w) - n, err := o.Write([]byte(input)) - if err != expectedErr { - t.Fatalf("Output.Write() should return the first error encountered, if any") - } - if buf.String() != input { - t.Fatalf("Output.Write() should attempt write on all destinations, even after encountering an error") - } - if n != len(input) { - t.Fatalf("Output.Write() should return the size of the input if it successfully writes to at least one destination") - } -} - -func TestInputAddEmpty(t *testing.T) { - i := NewInput() - var b bytes.Buffer - if err := i.Add(&b); err != nil { - t.Fatal(err) - } - data, err := ioutil.ReadAll(i) - if err != nil { - t.Fatal(err) - } - if len(data) > 0 { - t.Fatalf("Read from empty input shoul yield no data") - } -} - -func TestInputAddTwo(t *testing.T) { - i := NewInput() - var b1 bytes.Buffer - // First add should succeed - if err := i.Add(&b1); err != nil { - t.Fatal(err) - } - var b2 bytes.Buffer - // Second add should fail - if err := i.Add(&b2); err == nil { - t.Fatalf("Adding a second source should return an error") - } -} - -func TestInputAddNotEmpty(t *testing.T) { - i := NewInput() - b := bytes.NewBufferString("hello world\nabc") - expectedResult := b.String() - i.Add(b) - result, err := ioutil.ReadAll(i) - if err != nil { - t.Fatal(err) - } - if string(result) != expectedResult { - t.Fatalf("Expected: %v\nReceived: %v", expectedResult, result) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/table.go b/Godeps/_workspace/src/github.com/docker/docker/engine/table.go deleted file mode 100644 index 4498bdf1..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/table.go +++ /dev/null @@ -1,140 +0,0 @@ -package engine - -import ( - "bytes" - "encoding/json" - "io" - "sort" - "strconv" -) - -type Table struct { - Data []*Env - sortKey string - Chan chan *Env -} - -func NewTable(sortKey string, sizeHint int) *Table { - return &Table{ - make([]*Env, 0, sizeHint), - sortKey, - make(chan *Env), - } -} - -func (t *Table) SetKey(sortKey string) { - t.sortKey = sortKey -} - -func (t *Table) Add(env *Env) { - t.Data = append(t.Data, env) -} - -func (t *Table) Len() int { - return len(t.Data) -} - -func (t *Table) Less(a, b int) bool { - return t.lessBy(a, b, t.sortKey) -} - -func (t *Table) lessBy(a, b int, by string) bool { - keyA := t.Data[a].Get(by) - keyB := t.Data[b].Get(by) - intA, errA := strconv.ParseInt(keyA, 10, 64) - intB, errB := strconv.ParseInt(keyB, 10, 64) - if errA == nil && errB == nil { - return intA < intB - } - return keyA < keyB -} - -func (t *Table) Swap(a, b int) { - tmp := t.Data[a] - t.Data[a] = t.Data[b] - t.Data[b] = tmp -} - -func (t *Table) Sort() { - sort.Sort(t) -} - -func (t *Table) ReverseSort() { - sort.Sort(sort.Reverse(t)) -} - -func (t *Table) WriteListTo(dst io.Writer) (n int64, err error) { - if _, err := dst.Write([]byte{'['}); err != nil { - return -1, err - } - n = 1 - for i, env := range t.Data { - bytes, err := env.WriteTo(dst) - if err != nil { - return -1, err - } - n += bytes - if i != len(t.Data)-1 { - if _, err := dst.Write([]byte{','}); err != nil { - return -1, err - } - n++ - } - } - if _, err := dst.Write([]byte{']'}); err != nil { - return -1, err - } - return n + 1, nil -} - -func (t *Table) ToListString() (string, error) { - buffer := bytes.NewBuffer(nil) - if _, err := t.WriteListTo(buffer); err != nil { - return "", err - } - return buffer.String(), nil -} - -func (t *Table) WriteTo(dst io.Writer) (n int64, err error) { - for _, env := range t.Data { - bytes, err := env.WriteTo(dst) - if err != nil { - return -1, err - } - n += bytes - } - return n, nil -} - -func (t *Table) ReadListFrom(src []byte) (n int64, err error) { - var array []interface{} - - if err := json.Unmarshal(src, &array); err != nil { - return -1, err - } - - for _, item := range array { - if m, ok := item.(map[string]interface{}); ok { - env := &Env{} - for key, value := range m { - env.SetAuto(key, value) - } - t.Add(env) - } - } - - return int64(len(src)), nil -} - -func (t *Table) ReadFrom(src io.Reader) (n int64, err error) { - decoder := NewDecoder(src) - for { - env, err := decoder.Decode() - if err == io.EOF { - return 0, nil - } else if err != nil { - return -1, err - } - t.Add(env) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/engine/table_test.go b/Godeps/_workspace/src/github.com/docker/docker/engine/table_test.go deleted file mode 100644 index 9a32ac9c..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/engine/table_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package engine - -import ( - "bytes" - "encoding/json" - "testing" -) - -func TestTableWriteTo(t *testing.T) { - table := NewTable("", 0) - e := &Env{} - e.Set("foo", "bar") - table.Add(e) - var buf bytes.Buffer - if _, err := table.WriteTo(&buf); err != nil { - t.Fatal(err) - } - output := make(map[string]string) - if err := json.Unmarshal(buf.Bytes(), &output); err != nil { - t.Fatal(err) - } - if len(output) != 1 { - t.Fatalf("Incorrect output: %v", output) - } - if val, exists := output["foo"]; !exists || val != "bar" { - t.Fatalf("Inccorect output: %v", output) - } -} - -func TestTableSortStringValue(t *testing.T) { - table := NewTable("Key", 0) - - e := &Env{} - e.Set("Key", "A") - table.Add(e) - - e = &Env{} - e.Set("Key", "D") - table.Add(e) - - e = &Env{} - e.Set("Key", "B") - table.Add(e) - - e = &Env{} - e.Set("Key", "C") - table.Add(e) - - table.Sort() - - if len := table.Len(); len != 4 { - t.Fatalf("Expected 4, got %d", len) - } - - if value := table.Data[0].Get("Key"); value != "A" { - t.Fatalf("Expected A, got %s", value) - } - - if value := table.Data[1].Get("Key"); value != "B" { - t.Fatalf("Expected B, got %s", value) - } - - if value := table.Data[2].Get("Key"); value != "C" { - t.Fatalf("Expected C, got %s", value) - } - - if value := table.Data[3].Get("Key"); value != "D" { - t.Fatalf("Expected D, got %s", value) - } -} - -func TestTableReverseSortStringValue(t *testing.T) { - table := NewTable("Key", 0) - - e := &Env{} - e.Set("Key", "A") - table.Add(e) - - e = &Env{} - e.Set("Key", "D") - table.Add(e) - - e = &Env{} - e.Set("Key", "B") - table.Add(e) - - e = &Env{} - e.Set("Key", "C") - table.Add(e) - - table.ReverseSort() - - if len := table.Len(); len != 4 { - t.Fatalf("Expected 4, got %d", len) - } - - if value := table.Data[0].Get("Key"); value != "D" { - t.Fatalf("Expected D, got %s", value) - } - - if value := table.Data[1].Get("Key"); value != "C" { - t.Fatalf("Expected B, got %s", value) - } - - if value := table.Data[2].Get("Key"); value != "B" { - t.Fatalf("Expected C, got %s", value) - } - - if value := table.Data[3].Get("Key"); value != "A" { - t.Fatalf("Expected A, got %s", value) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/nat/nat.go b/Godeps/_workspace/src/github.com/docker/docker/nat/nat.go index fdecf3f9..2cec2e86 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/nat/nat.go +++ b/Godeps/_workspace/src/github.com/docker/docker/nat/nat.go @@ -34,6 +34,9 @@ func NewPort(proto, port string) Port { } func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } port, err := strconv.ParseUint(rawPort, 10, 16) if err != nil { return 0, err diff --git a/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go b/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go index df9decf6..1db45473 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go +++ b/Godeps/_workspace/src/github.com/docker/docker/opts/opts.go @@ -8,16 +8,20 @@ import ( "regexp" "strings" - "github.com/docker/docker/api" flag "github.com/docker/docker/pkg/mflag" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/ulimit" - "github.com/docker/docker/utils" ) var ( - alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) - domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) + DefaultHTTPHost = "127.0.0.1" // Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080 + // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter + // is not supplied. A better longer term solution would be to use a named + // pipe as the default on the Windows daemon. + DefaultHTTPPort = 2375 // Default HTTP Port + DefaultUnixSocket = "/var/run/docker.sock" // Docker daemon by default always listens on the default unix socket ) func ListVar(values *[]string, names []string, usage string) { @@ -25,7 +29,7 @@ func ListVar(values *[]string, names []string, usage string) { } func HostListVar(values *[]string, names []string, usage string) { - flag.Var(newListOptsRef(values, api.ValidateHost), names, usage) + flag.Var(newListOptsRef(values, ValidateHost), names, usage) } func IPListVar(values *[]string, names []string, usage string) { @@ -174,7 +178,7 @@ func ValidateEnv(val string) (string, error) { if len(arr) > 1 { return val, nil } - if !utils.DoesEnvExist(val) { + if !doesEnvExist(val) { return val, nil } return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil @@ -234,3 +238,21 @@ func ValidateLabel(val string) (string, error) { } return val, nil } + +func ValidateHost(val string) (string, error) { + host, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val) + if err != nil { + return val, err + } + return host, nil +} + +func doesEnvExist(name string) bool { + for _, entry := range os.Environ() { + parts := strings.SplitN(entry, "=", 2) + if parts[0] == name { + return true + } + } + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go index bfa6e184..4d8d2600 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go @@ -18,7 +18,7 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" @@ -78,7 +78,7 @@ func DetectCompression(source []byte) Compression { Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, } { if len(source) < len(m) { - log.Debugf("Len too short") + logrus.Debugf("Len too short") continue } if bytes.Compare(m, source[:len(m)]) == 0 { @@ -331,7 +331,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L } case tar.TypeXGlobalHeader: - log.Debugf("PAX Global Extended Headers found and ignored") + logrus.Debugf("PAX Global Extended Headers found and ignored") return nil default: @@ -350,7 +350,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode - if hdr.Typeflag != tar.TypeSymlink { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { if err := os.Chmod(path, hdrInfo.Mode()); err != nil { return err } @@ -358,7 +364,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} // syscall.UtimesNano doesn't support a NOFOLLOW flag atm, and - if hdr.Typeflag != tar.TypeSymlink { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } @@ -376,25 +388,16 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) { return TarWithOptions(path, &TarOptions{Compression: compression}) } -func escapeName(name string) string { - escaped := make([]byte, 0) - for i, c := range []byte(name) { - if i == 0 && c == '/' { - continue - } - // all printable chars except "-" which is 0x2d - if (0x20 <= c && c <= 0x7E) && c != 0x2d { - escaped = append(escaped, c) - } else { - escaped = append(escaped, fmt.Sprintf("\\%03o", c)...) - } - } - return string(escaped) -} - // TarWithOptions creates an archive from the directory at `path`, only including files whose relative // paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) + + if err != nil { + return nil, err + } + pipeReader, pipeWriter := io.Pipe() compressWriter, err := CompressStream(pipeWriter, options.Compression) @@ -426,7 +429,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) for _, include := range options.IncludeFiles { filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error { if err != nil { - log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) + logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) return nil } @@ -445,15 +448,15 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // is asking for that file no matter what - which is true // for some files, like .dockerignore and Dockerfile (sometimes) if include != relFilePath { - skip, err = fileutils.Matches(relFilePath, options.ExcludePatterns) + skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) if err != nil { - log.Debugf("Error matching %s", relFilePath, err) + logrus.Debugf("Error matching %s", relFilePath, err) return err } } if skip { - if f.IsDir() { + if !exceptions && f.IsDir() { return filepath.SkipDir } return nil @@ -474,7 +477,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) } if err := ta.addTarFile(filePath, relFilePath); err != nil { - log.Debugf("Can't add file %s to tar: %s", filePath, err) + logrus.Debugf("Can't add file %s to tar: %s", filePath, err) } return nil }) @@ -482,13 +485,13 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { - log.Debugf("Can't close tar writer: %s", err) + logrus.Debugf("Can't close tar writer: %s", err) } if err := compressWriter.Close(); err != nil { - log.Debugf("Can't close compress writer: %s", err) + logrus.Debugf("Can't close compress writer: %s", err) } if err := pipeWriter.Close(); err != nil { - log.Debugf("Can't close pipe writer: %s", err) + logrus.Debugf("Can't close pipe writer: %s", err) } }() @@ -606,7 +609,7 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error { } func (archiver *Archiver) TarUntar(src, dst string) error { - log.Debugf("TarUntar(%s %s)", src, dst) + logrus.Debugf("TarUntar(%s %s)", src, dst) archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) if err != nil { return err @@ -648,11 +651,11 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error { return archiver.CopyFileWithTar(src, dst) } // Create dst, copy src's content into it - log.Debugf("Creating dest directory: %s", dst) + logrus.Debugf("Creating dest directory: %s", dst) if err := os.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { return err } - log.Debugf("Calling TarUntar(%s, %s)", src, dst) + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) return archiver.TarUntar(src, dst) } @@ -665,7 +668,7 @@ func CopyWithTar(src, dst string) error { } func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - log.Debugf("CopyFileWithTar(%s, %s)", src, dst) + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) srcSt, err := os.Stat(src) if err != nil { return err diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go index 6cd95d5a..ae9b5a8c 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go @@ -14,9 +14,150 @@ import ( "testing" "time" + "github.com/docker/docker/pkg/system" "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" ) +func TestIsArchiveNilHeader(t *testing.T) { + out := IsArchive(nil) + if out { + t.Fatalf("isArchive should return false as nil is not a valid archive header") + } +} + +func TestIsArchiveInvalidHeader(t *testing.T) { + header := []byte{0x00, 0x01, 0x02} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is not a valid archive header", header) + } +} + +func TestIsArchiveBzip2(t *testing.T) { + header := []byte{0x42, 0x5A, 0x68} + out := IsArchive(header) + if !out { + t.Fatalf("isArchive should return true as %s is a bz2 header", header) + } +} + +func TestIsArchive7zip(t *testing.T) { + header := []byte{0x50, 0x4b, 0x03, 0x04} + out := IsArchive(header) + if out { + t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) + } +} + +func TestDecompressStreamGzip(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && gzip -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.gz") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a gzip file.") + } +} + +func TestDecompressStreamBzip2(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && bzip2 -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.bz2") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a bzip2 file.") + } +} + +func TestDecompressStreamXz(t *testing.T) { + cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && xz -f /tmp/archive") + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("Fail to create an archive file for test : %s.", output) + } + archive, err := os.Open("/tmp/archive.xz") + _, err = DecompressStream(archive) + if err != nil { + t.Fatalf("Failed to decompress a xz file.") + } +} + +func TestCompressStreamXzUnsuported(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamBzip2Unsupported(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, Xz) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestCompressStreamInvalid(t *testing.T) { + dest, err := os.Create("/tmp/dest") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + _, err = CompressStream(dest, -1) + if err == nil { + t.Fatalf("Should fail as xz is unsupported for compression format.") + } +} + +func TestExtensionInvalid(t *testing.T) { + compression := Compression(-1) + output := compression.Extension() + if output != "" { + t.Fatalf("The extension of an invalid compression should be an empty string.") + } +} + +func TestExtensionUncompressed(t *testing.T) { + compression := Uncompressed + output := compression.Extension() + if output != "tar" { + t.Fatalf("The extension of a uncompressed archive should be 'tar'.") + } +} +func TestExtensionBzip2(t *testing.T) { + compression := Bzip2 + output := compression.Extension() + if output != "tar.bz2" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") + } +} +func TestExtensionGzip(t *testing.T) { + compression := Gzip + output := compression.Extension() + if output != "tar.gz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") + } +} +func TestExtensionXz(t *testing.T) { + compression := Xz + output := compression.Extension() + if output != "tar.xz" { + t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") + } +} + func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, err := CmdStream(cmd, nil) @@ -66,6 +207,315 @@ func TestCmdStreamGood(t *testing.T) { } } +func TestUntarPathWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + invalidDestFolder := path.Join(tempFolder, "invalidDest") + // Create a src file + srcFile := path.Join(tempFolder, "src") + _, err = os.Create(srcFile) + if err != nil { + t.Fatalf("Fail to create the source file") + } + err = UntarPath(srcFile, invalidDestFolder) + if err == nil { + t.Fatalf("UntarPath with invalid destination path should throw an error.") + } +} + +func TestUntarPathWithInvalidSrc(t *testing.T) { + dest, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatalf("Fail to create the destination file") + } + defer os.RemoveAll(dest) + err = UntarPath("/invalid/path", dest) + if err == nil { + t.Fatalf("UntarPath with invalid src path should throw an error.") + } +} + +func TestUntarPath(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(path.Join(tmpFolder, "src")) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := path.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath shouldn't throw an error, %s.", err) + } + expectedFile := path.Join(destFolder, srcFile) + _, err = os.Stat(expectedFile) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +// Do the same test as above but with the destination as file, it should fail +func TestUntarPathWithDestinationFile(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(path.Join(tmpFolder, "src")) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFile := path.Join(tmpFolder, "dest") + _, err = os.Create(destFile) + if err != nil { + t.Fatalf("Fail to create the destination file") + } + err = UntarPath(tarFile, destFile) + if err == nil { + t.Fatalf("UntarPath should throw an error if the destination if a file") + } +} + +// Do the same test as above but with the destination folder already exists +// and the destination file is a directory +// It's working, see https://github.com/docker/docker/issues/10040 +func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { + tmpFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpFolder) + srcFile := path.Join(tmpFolder, "src") + tarFile := path.Join(tmpFolder, "src.tar") + os.Create(srcFile) + cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) + _, err = cmd.CombinedOutput() + if err != nil { + t.Fatal(err) + } + destFolder := path.Join(tmpFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatalf("Fail to create the destination folder") + } + // Let's create a folder that will has the same path as the extracted file (from tar) + destSrcFileAsFolder := path.Join(destFolder, srcFile) + err = os.MkdirAll(destSrcFileAsFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = UntarPath(tarFile, destFolder) + if err != nil { + t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") + } +} + +func TestCopyWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + destFolder := path.Join(tempFolder, "dest") + invalidSrc := path.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(invalidSrc, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + srcFolder := path.Join(tempFolder, "src") + inexistentDestFolder := path.Join(tempFolder, "doesnotexists") + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyWithTar(srcFolder, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } +} + +// Test CopyWithTar with a file as src +func TestCopyWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + srcFolder := path.Join(folder, "src") + src := path.Join(folder, path.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content + if err != nil { + t.Fatalf("Destination file should be the same as the source.") + } +} + +// Test CopyWithTar with a folder as src +func TestCopyWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + src := path.Join(folder, path.Join("src", "folder")) + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(path.Join(src, "file"), []byte("content"), 0777) + err = CopyWithTar(src, dest) + if err != nil { + t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + // FIXME Check the content (the file inside) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + +func TestCopyFileWithTarInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempFolder) + destFolder := path.Join(tempFolder, "dest") + err = os.MkdirAll(destFolder, 0740) + if err != nil { + t.Fatal(err) + } + invalidFile := path.Join(tempFolder, "doesnotexists") + err = CopyFileWithTar(invalidFile, destFolder) + if err == nil { + t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") + } +} + +func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(nil) + } + defer os.RemoveAll(tempFolder) + srcFile := path.Join(tempFolder, "src") + inexistentDestFolder := path.Join(tempFolder, "doesnotexists") + _, err = os.Create(srcFile) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(srcFile, inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") + } + _, err = os.Stat(inexistentDestFolder) + if err != nil { + t.Fatalf("CopyWithTar with an inexistent folder should create it.") + } + // FIXME Test the src file and content +} + +func TestCopyFileWithTarSrcFolder(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + src := path.Join(folder, "srcfolder") + err = os.MkdirAll(src, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + err = CopyFileWithTar(src, dest) + if err == nil { + t.Fatalf("CopyFileWithTar should throw an error with a folder.") + } +} + +func TestCopyFileWithTarSrcFile(t *testing.T) { + folder, err := ioutil.TempDir("", "docker-archive-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(folder) + dest := path.Join(folder, "dest") + srcFolder := path.Join(folder, "src") + src := path.Join(folder, path.Join("src", "src")) + err = os.MkdirAll(srcFolder, 0740) + if err != nil { + t.Fatal(err) + } + err = os.MkdirAll(dest, 0740) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(src, []byte("content"), 0777) + err = CopyWithTar(src, dest+"/") + if err != nil { + t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) + } + _, err = os.Stat(dest) + if err != nil { + t.Fatalf("Destination folder should contain the source file but did not.") + } +} + func TestTarFiles(t *testing.T) { // try without hardlinks if err := checkNoChanges(1000, false); err != nil { @@ -179,11 +629,56 @@ func TestTarUntar(t *testing.T) { } } +func TestTarUntarWithXattr(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-untar-origin") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Lsetxattr(path.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { + t.Fatal(err) + } + + for _, c := range []Compression{ + Uncompressed, + Gzip, + } { + changes, err := tarUntar(t, origin, &TarOptions{ + Compression: c, + ExcludePatterns: []string{"3"}, + }) + + if err != nil { + t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) + } + + if len(changes) != 1 || changes[0].Path != "/3" { + t.Fatalf("Unexpected differences after tarUntar: %v", changes) + } + capability, _ := system.Lgetxattr(path.Join(origin, "2"), "security.capability") + if capability == nil && capability[0] != 0x00 { + t.Fatalf("Untar should have kept the 'security.capability' xattr.") + } + } +} + func TestTarWithOptions(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } + if _, err := ioutil.TempDir(origin, "folder"); err != nil { + t.Fatal(err) + } defer os.RemoveAll(origin) if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) @@ -196,8 +691,11 @@ func TestTarWithOptions(t *testing.T) { opts *TarOptions numChanges int }{ - {&TarOptions{IncludeFiles: []string{"1"}}, 1}, + {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, + {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, + {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, + {&TarOptions{Name: "test", IncludeFiles: []string{"1"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) @@ -256,6 +754,58 @@ func TestUntarUstarGnuConflict(t *testing.T) { } } +func TestTarWithBlockCharFifo(t *testing.T) { + origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(origin) + if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + if err := system.Mknod(path.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { + t.Fatal(err) + } + + dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dest) + + // we'll do this in two steps to separate failure + fh, err := Tar(origin, Uncompressed) + if err != nil { + t.Fatal(err) + } + + // ensure we can read the whole thing with no error, before writing back out + buf, err := ioutil.ReadAll(fh) + if err != nil { + t.Fatal(err) + } + + bRdr := bytes.NewReader(buf) + err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) + if err != nil { + t.Fatal(err) + } + + changes, err := ChangesDirs(origin, dest) + if err != nil { + t.Fatal(err) + } + if len(changes) > 0 { + t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) + } +} + func TestTarWithHardLink(t *testing.T) { origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") if err != nil { @@ -435,6 +985,34 @@ func TestUntarInvalidFilenames(t *testing.T) { } } +func TestUntarHardlinkToSymlink(t *testing.T) { + for i, headers := range [][]*tar.Header{ + { + { + Name: "symlink1", + Typeflag: tar.TypeSymlink, + Linkname: "regfile", + Mode: 0644, + }, + { + Name: "symlink2", + Typeflag: tar.TypeLink, + Linkname: "symlink1", + Mode: 0644, + }, + { + Name: "regfile", + Typeflag: tar.TypeReg, + Mode: 0644, + }, + }, + } { + if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { + t.Fatalf("i=%d. %v", i, err) + } + } +} + func TestUntarInvalidHardlink(t *testing.T) { for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go index cbce65e3..82c9a82c 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_unix.go @@ -36,8 +36,8 @@ func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, st inode = uint64(s.Ino) // Currently go does not fil in the major/minors - if s.Mode&syscall.S_IFBLK == syscall.S_IFBLK || - s.Mode&syscall.S_IFCHR == syscall.S_IFCHR { + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { hdr.Devmajor = int64(major(uint64(s.Rdev))) hdr.Devminor = int64(minor(uint64(s.Rdev))) } diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows_test.go index b33e0fb0..72bc71e0 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_windows_test.go @@ -20,7 +20,7 @@ func TestCanonicalTarNameForPath(t *testing.T) { if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) } else if v.shouldFail && err == nil { - t.Fatalf("canonical path call should have pailed with error. in=%s out=%s", v.in, out) + t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) } else if !v.shouldFail && out != v.expected { t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) } diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go index c3cb4ebe..06fad8eb 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes.go @@ -13,7 +13,7 @@ import ( "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar" - log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" ) @@ -176,7 +176,7 @@ func (info *FileInfo) path() string { } func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR == syscall.S_IFDIR + return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 } func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { @@ -401,22 +401,22 @@ func ExportChanges(dir string, changes []Change) (Archive, error) { ChangeTime: timestamp, } if err := ta.TarWriter.WriteHeader(hdr); err != nil { - log.Debugf("Can't write whiteout header: %s", err) + logrus.Debugf("Can't write whiteout header: %s", err) } } else { path := filepath.Join(dir, change.Path) if err := ta.addTarFile(path, change.Path[1:]); err != nil { - log.Debugf("Can't add file %s to tar: %s", path, err) + logrus.Debugf("Can't add file %s to tar: %s", path, err) } } } // Make sure to check the error on Close. if err := ta.TarWriter.Close(); err != nil { - log.Debugf("Can't close layer: %s", err) + logrus.Debugf("Can't close layer: %s", err) } if err := writer.Close(); err != nil { - log.Debugf("failed close Changes writer: %s", err) + logrus.Debugf("failed close Changes writer: %s", err) } }() return reader, nil diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go index 53ec575b..290b2dd4 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go @@ -6,6 +6,7 @@ import ( "os/exec" "path" "sort" + "syscall" "testing" "time" ) @@ -91,17 +92,130 @@ func createSampleDir(t *testing.T, root string) { } } +func TestChangeString(t *testing.T) { + modifiyChange := Change{"change", ChangeModify} + toString := modifiyChange.String() + if toString != "C change" { + t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) + } + addChange := Change{"change", ChangeAdd} + toString = addChange.String() + if toString != "A change" { + t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) + } + deleteChange := Change{"change", ChangeDelete} + toString = deleteChange.String() + if toString != "D change" { + t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) + } +} + +func TestChangesWithNoChanges(t *testing.T) { + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + if len(changes) != 0 { + t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) + } +} + +func TestChangesWithChanges(t *testing.T) { + rwLayer, err := ioutil.TempDir("", "docker-changes-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(rwLayer) + // Create a folder + dir1 := path.Join(rwLayer, "dir1") + os.MkdirAll(dir1, 0740) + deletedFile := path.Join(dir1, ".wh.file1-2") + ioutil.WriteFile(deletedFile, []byte{}, 0600) + modifiedFile := path.Join(dir1, "file1-1") + ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) + // Let's add a subfolder for a newFile + subfolder := path.Join(dir1, "subfolder") + os.MkdirAll(subfolder, 0740) + newFile := path.Join(subfolder, "newFile") + ioutil.WriteFile(newFile, []byte{}, 0740) + // Let's create folders that with have the role of layers with the same data + layer, err := ioutil.TempDir("", "docker-changes-test-layer") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(layer) + createSampleDir(t, layer) + os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) + + // Let's modify modtime for dir1 to be sure it's the same for the two layer (to not having false positive) + fi, err := os.Stat(dir1) + if err != nil { + return + } + mtime := fi.ModTime() + stat := fi.Sys().(*syscall.Stat_t) + atime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) + + layerDir1 := path.Join(layer, "dir1") + os.Chtimes(layerDir1, atime, mtime) + + changes, err := Changes([]string{layer}, rwLayer) + if err != nil { + t.Fatal(err) + } + + sort.Sort(changesByPath(changes)) + + expectedChanges := []Change{ + {"/dir1/file1-1", ChangeModify}, + {"/dir1/file1-2", ChangeDelete}, + {"/dir1/subfolder", ChangeModify}, + {"/dir1/subfolder/newFile", ChangeAdd}, + } + + for i := 0; i < max(len(changes), len(expectedChanges)); i++ { + if i >= len(expectedChanges) { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } + if i >= len(changes) { + t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) + } + if changes[i].Path == expectedChanges[i].Path { + if changes[i] != expectedChanges[i] { + t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) + } + } else if changes[i].Path < expectedChanges[i].Path { + t.Fatalf("unexpected change %s\n", changes[i].String()) + } else { + t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) + } + } +} + // Create an directory, copy it, make sure we report no changes between the two func TestChangesDirsEmpty(t *testing.T) { src, err := ioutil.TempDir("", "docker-changes-test") if err != nil { t.Fatal(err) } + defer os.RemoveAll(src) createSampleDir(t, src) dst := src + "-copy" if err := copyDir(src, dst); err != nil { t.Fatal(err) } + defer os.RemoveAll(dst) changes, err := ChangesDirs(dst, src) if err != nil { t.Fatal(err) @@ -291,3 +405,41 @@ func TestApplyLayer(t *testing.T) { t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) } } + +func TestChangesSizeWithNoChanges(t *testing.T) { + size := ChangesSize("/tmp", nil) + if size != 0 { + t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) + } +} + +func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { + changes := []Change{ + {Path: "deletedPath", Kind: ChangeDelete}, + } + size := ChangesSize("/tmp", changes) + if size != 0 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} + +func TestChangesSize(t *testing.T) { + parentPath, err := ioutil.TempDir("", "docker-changes-test") + defer os.RemoveAll(parentPath) + addition := path.Join(parentPath, "addition") + if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + modification := path.Join(parentPath, "modification") + if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { + t.Fatal(err) + } + changes := []Change{ + {Path: "addition", Kind: ChangeAdd}, + {Path: "modification", Kind: ChangeModify}, + } + size := ChangesSize(parentPath, changes) + if size != 6 { + t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap_test.go new file mode 100644 index 00000000..46ab3669 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/wrap_test.go @@ -0,0 +1,98 @@ +package archive + +import ( + "archive/tar" + "bytes" + "io" + "testing" +) + +func TestGenerateEmptyFile(t *testing.T) { + archive, err := Generate("emptyFile") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"emptyFile", ""}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} + +func TestGenerateWithContent(t *testing.T) { + archive, err := Generate("file", "content") + if err != nil { + t.Fatal(err) + } + if archive == nil { + t.Fatal("The generated archive should not be nil.") + } + + expectedFiles := [][]string{ + {"file", "content"}, + } + + tr := tar.NewReader(archive) + actualFiles := make([][]string, 0, 10) + i := 0 + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + buf := new(bytes.Buffer) + buf.ReadFrom(tr) + content := buf.String() + actualFiles = append(actualFiles, []string{hdr.Name, content}) + i++ + } + if len(actualFiles) != len(expectedFiles) { + t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) + } + for i := 0; i < len(expectedFiles); i++ { + actual := actualFiles[i] + expected := expectedFiles[i] + if actual[0] != expected[0] { + t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) + } + if actual[1] != expected[1] { + t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/common/randomid.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/common/randomid.go deleted file mode 100644 index 5c6d5920..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/common/randomid.go +++ /dev/null @@ -1,47 +0,0 @@ -package common - -import ( - "crypto/rand" - "encoding/hex" - "io" - "strconv" -) - -// TruncateID returns a shorthand version of a string identifier for convenience. -// A collision with other shorthands is very unlikely, but possible. -// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller -// will need to use a langer prefix, or the full-length Id. -func TruncateID(id string) string { - shortLen := 12 - if len(id) < shortLen { - shortLen = len(id) - } - return id[:shortLen] -} - -// GenerateRandomID returns an unique id -func GenerateRandomID() string { - for { - id := make([]byte, 32) - if _, err := io.ReadFull(rand.Reader, id); err != nil { - panic(err) // This shouldn't happen - } - value := hex.EncodeToString(id) - // if we try to parse the truncated for as an int and we don't have - // an error then the value is all numberic and causes issues when - // used as a hostname. ref #3869 - if _, err := strconv.ParseInt(TruncateID(value), 10, 64); err == nil { - continue - } - return value - } -} - -func RandomString() string { - id := make([]byte, 32) - - if _, err := io.ReadFull(rand.Reader, id); err != nil { - panic(err) // This shouldn't happen - } - return hex.EncodeToString(id) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/common/randomid_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/common/randomid_test.go deleted file mode 100644 index 1dba4125..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/common/randomid_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package common - -import ( - "testing" -) - -func TestShortenId(t *testing.T) { - id := GenerateRandomID() - truncID := TruncateID(id) - if len(truncID) != 12 { - t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) - } -} - -func TestShortenIdEmpty(t *testing.T) { - id := "" - truncID := TruncateID(id) - if len(truncID) > len(id) { - t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) - } -} - -func TestShortenIdInvalid(t *testing.T) { - id := "1234" - truncID := TruncateID(id) - if len(truncID) != len(id) { - t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) - } -} - -func TestGenerateRandomID(t *testing.T) { - id := GenerateRandomID() - - if len(id) != 64 { - t.Fatalf("Id returned is incorrect: %s", id) - } -} - -func TestRandomString(t *testing.T) { - id := RandomString() - if len(id) != 64 { - t.Fatalf("Id returned is incorrect: %s", id) - } -} - -func TestRandomStringUniqueness(t *testing.T) { - repeats := 25 - set := make(map[string]struct{}, repeats) - for i := 0; i < repeats; i = i + 1 { - id := RandomString() - if len(id) != 64 { - t.Fatalf("Id returned is incorrect: %s", id) - } - if _, ok := set[id]; ok { - t.Fatalf("Random number is repeated") - } - set[id] = struct{}{} - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go index 4e4a91b9..fdafb53c 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -1,26 +1,170 @@ package fileutils import ( - log "github.com/Sirupsen/logrus" + "errors" + "fmt" + "io" + "io/ioutil" + "os" "path/filepath" + "strings" + + "github.com/Sirupsen/logrus" ) -// Matches returns true if relFilePath matches any of the patterns -func Matches(relFilePath string, patterns []string) (bool, error) { - for _, exclude := range patterns { - matched, err := filepath.Match(exclude, relFilePath) +func Exclusion(pattern string) bool { + return pattern[0] == '!' +} + +func Empty(pattern string) bool { + return pattern == "" +} + +// Cleanpatterns takes a slice of patterns returns a new +// slice of patterns cleaned with filepath.Clean, stripped +// of any empty patterns and lets the caller know whether the +// slice contains any exception patterns (prefixed with !). +func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { + // Loop over exclusion patterns and: + // 1. Clean them up. + // 2. Indicate whether we are dealing with any exception rules. + // 3. Error if we see a single exclusion marker on it's own (!). + cleanedPatterns := []string{} + patternDirs := [][]string{} + exceptions := false + for _, pattern := range patterns { + // Eliminate leading and trailing whitespace. + pattern = strings.TrimSpace(pattern) + if Empty(pattern) { + continue + } + if Exclusion(pattern) { + if len(pattern) == 1 { + logrus.Errorf("Illegal exclusion pattern: %s", pattern) + return nil, nil, false, errors.New("Illegal exclusion pattern: !") + } + exceptions = true + } + pattern = filepath.Clean(pattern) + cleanedPatterns = append(cleanedPatterns, pattern) + if Exclusion(pattern) { + pattern = pattern[1:] + } + patternDirs = append(patternDirs, strings.Split(pattern, "/")) + } + + return cleanedPatterns, patternDirs, exceptions, nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + patterns, patDirs, _, err := CleanPatterns(patterns) + if err != nil { + return false, err + } + + return OptimizedMatches(file, patterns, patDirs) +} + +// Matches is basically the same as fileutils.Matches() but optimized for archive.go. +// It will assume that the inputs have been preprocessed and therefore the function +// doen't need to do as much error checking and clean-up. This was done to avoid +// repeating these steps on each file being checked during the archive process. +// The more generic fileutils.Matches() can't make these assumptions. +func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { + matched := false + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, "/") + + for i, pattern := range patterns { + negative := false + + if Exclusion(pattern) { + negative = true + pattern = pattern[1:] + } + + match, err := filepath.Match(pattern, file) if err != nil { - log.Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude) + logrus.Errorf("Error matching: %s (pattern: %s)", file, pattern) return false, err } - if matched { - if filepath.Clean(relFilePath) == "." { - log.Errorf("Can't exclude whole path, excluding pattern: %s", exclude) - continue + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(patDirs[i]) <= len(parentPathDirs) { + match, _ = filepath.Match(strings.Join(patDirs[i], "/"), + strings.Join(parentPathDirs[:len(patDirs[i])], "/")) } - log.Debugf("Skipping excluded path: %s", relFilePath) - return true, nil + } + + if match { + matched = !negative } } - return false, nil + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + return matched, nil +} + +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil } diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go new file mode 100644 index 00000000..ef931684 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/fileutils/fileutils_test.go @@ -0,0 +1,357 @@ +package fileutils + +import ( + "io/ioutil" + "os" + "path" + "testing" +) + +// CopyFile with invalid src +func TestCopyFileWithInvalidSrc(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with invalid dest +func TestCopyFileWithInvalidDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "file") + err = ioutil.WriteFile(src, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) + if err == nil { + t.Fatal("Should have fail to copy an invalid src file") + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes") + } + +} + +// CopyFile with same src and dest +func TestCopyFileWithSameSrcAndDest(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + file := path.Join(tempFolder, "file") + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, file) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +// CopyFile with same src and dest but path is different and not clean +func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + testFolder := path.Join(tempFolder, "test") + err = os.MkdirAll(testFolder, 0740) + if err != nil { + t.Fatal(err) + } + file := path.Join(testFolder, "file") + sameFile := testFolder + "/../test/file" + err = ioutil.WriteFile(file, []byte("content"), 0740) + if err != nil { + t.Fatal(err) + } + bytes, err := CopyFile(file, sameFile) + if err != nil { + t.Fatal(err) + } + if bytes != 0 { + t.Fatal("Should have written 0 bytes as it is the same file.") + } +} + +func TestCopyFile(t *testing.T) { + tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") + defer os.RemoveAll(tempFolder) + if err != nil { + t.Fatal(err) + } + src := path.Join(tempFolder, "src") + dest := path.Join(tempFolder, "dest") + ioutil.WriteFile(src, []byte("content"), 0777) + ioutil.WriteFile(dest, []byte("destContent"), 0777) + bytes, err := CopyFile(src, dest) + if err != nil { + t.Fatal(err) + } + if bytes != 7 { + t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) + } + actual, err := ioutil.ReadFile(dest) + if err != nil { + t.Fatal(err) + } + if string(actual) != "content" { + t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") + } +} + +// Reading a symlink to a directory must return the directory +func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { + var err error + if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { + t.Errorf("failed to create directory: %s", err) + } + + if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { + t.Fatalf("failed to read symlink to directory: %s", err) + } + + if path != "/tmp/testReadSymlinkToExistingDirectory" { + t.Fatalf("symlink returned unexpected directory: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { + t.Errorf("failed to remove temporary directory: %s", err) + } + + if err = os.Remove("/tmp/dirLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +// Reading a non-existing symlink must fail +func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { + var path string + var err error + if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { + t.Fatalf("error expected for non-existing symlink") + } + + if path != "" { + t.Fatalf("expected empty path, but '%s' was returned", path) + } +} + +// Reading a symlink to a file must fail +func TestReadSymlinkedDirectoryToFile(t *testing.T) { + var err error + var file *os.File + + if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { + t.Fatalf("failed to create file: %s", err) + } + + file.Close() + + if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to create symlink: %s", err) + } + + var path string + if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { + t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") + } + + if path != "" { + t.Fatalf("path should've been empty: %s", path) + } + + if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { + t.Errorf("failed to remove file: %s", err) + } + + if err = os.Remove("/tmp/fileLinkTest"); err != nil { + t.Errorf("failed to remove symlink: %s", err) + } +} + +func TestWildcardMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*"}) + if match != true { + t.Errorf("failed to get a wildcard match, got %v", match) + } +} + +// A simple pattern match should return true. +func TestPatternMatches(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go"}) + if match != true { + t.Errorf("failed to get a match, got %v", match) + } +} + +// An exclusion followed by an inclusion should return true. +func TestExclusionPatternMatchesPatternBefore(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) + if match != true { + t.Errorf("failed to get true match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A folder pattern followed by an exception should return false. +func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { + match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) + if match != false { + t.Errorf("failed to get a false match on exclusion pattern, got %v", match) + } +} + +// A pattern followed by an exclusion should return false. +func TestExclusionPatternMatchesPatternAfter(t *testing.T) { + match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) + if match != false { + t.Errorf("failed to get false match on exclusion pattern, got %v", match) + } +} + +// A filename evaluating to . should return false. +func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { + match, _ := Matches(".", []string{"*.go"}) + if match != false { + t.Errorf("failed to get false match on ., got %v", match) + } +} + +// A single ! pattern should return an error. +func TestSingleExclamationError(t *testing.T) { + _, err := Matches("fileutils.go", []string{"!"}) + if err == nil { + t.Errorf("failed to get an error for a single exclamation point, got %v", err) + } +} + +// A string preceded with a ! should return true from Exclusion. +func TestExclusion(t *testing.T) { + exclusion := Exclusion("!") + if !exclusion { + t.Errorf("failed to get true for a single !, got %v", exclusion) + } +} + +// Matches with no patterns +func TestMatchesWithNoPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{}) + if err != nil { + t.Fatal(err) + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// Matches with malformed patterns +func TestMatchesWithMalformedPatterns(t *testing.T) { + matches, err := Matches("/any/path/there", []string{"["}) + if err == nil { + t.Fatal("Should have failed because of a malformed syntax in the pattern") + } + if matches { + t.Fatalf("Should not have match anything") + } +} + +// An empty string should return true from Empty. +func TestEmpty(t *testing.T) { + empty := Empty("") + if !empty { + t.Errorf("failed to get true for an empty string, got %v", empty) + } +} + +func TestCleanPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsStripEmptyPatterns(t *testing.T) { + cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) + if len(cleaned) != 2 { + t.Errorf("expected 2 element slice, got %v", len(cleaned)) + } +} + +func TestCleanPatternsExceptionFlag(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { + _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) + if !exceptions { + t.Errorf("expected exceptions to be true, got %v", exceptions) + } +} + +func TestCleanPatternsErrorSingleException(t *testing.T) { + _, _, _, err := CleanPatterns([]string{"!"}) + if err == nil { + t.Errorf("expected error on single exclamation point, got %v", err) + } +} + +func TestCleanPatternsFolderSplit(t *testing.T) { + _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) + if dirs[0][0] != "docs" { + t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) + } + if dirs[0][1] != "config" { + t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go index 58ff1af6..0e542cba 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/readers.go @@ -3,6 +3,8 @@ package ioutils import ( "bytes" "crypto/rand" + "crypto/sha256" + "encoding/hex" "io" "math/big" "sync" @@ -215,3 +217,11 @@ func (r *bufReader) Close() error { } return closer.Close() } + +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go index c0b3608f..43fdc44e 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers.go @@ -37,3 +37,24 @@ func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { closer: closer, } } + +// Wrap a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers_test.go new file mode 100644 index 00000000..80d7f7f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ioutils/writers_test.go @@ -0,0 +1,41 @@ +package ioutils + +import ( + "bytes" + "strings" + "testing" +) + +func TestNopWriter(t *testing.T) { + nw := &NopWriter{} + l, err := nw.Write([]byte{'c'}) + if err != nil { + t.Fatal(err) + } + if l != 1 { + t.Fatalf("Expected 1 got %d", l) + } +} + +func TestWriteCounter(t *testing.T) { + dummy1 := "This is a dummy string." + dummy2 := "This is another dummy string." + totalLength := int64(len(dummy1) + len(dummy2)) + + reader1 := strings.NewReader(dummy1) + reader2 := strings.NewReader(dummy2) + + var buffer bytes.Buffer + wc := NewWriteCounter(&buffer) + + reader1.WriteTo(wc) + reader2.WriteTo(wc) + + if wc.Count != totalLength { + t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) + } + + if buffer.String() != dummy1+dummy2 { + t.Error("Wrong message written") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go index b35692bf..f0d20d99 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/flag.go @@ -486,8 +486,7 @@ func (f *FlagSet) Set(name, value string) error { if !ok { return fmt.Errorf("no such flag -%v", name) } - err := flag.Value.Set(value) - if err != nil { + if err := flag.Value.Set(value); err != nil { return err } if f.actual == nil { @@ -941,11 +940,11 @@ func (f *FlagSet) parseOne() (bool, string, error) { // it's a flag. does it have an argument? f.args = f.args[1:] - has_value := false + hasValue := false value := "" if i := strings.Index(name, "="); i != -1 { value = trimQuotes(name[i+1:]) - has_value = true + hasValue = true name = name[:i] } @@ -962,7 +961,7 @@ func (f *FlagSet) parseOne() (bool, string, error) { return false, name, ErrRetry } if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg - if has_value { + if hasValue { if err := fv.Set(value); err != nil { return false, "", f.failf("invalid boolean value %q for -%s: %v", value, name, err) } @@ -971,12 +970,12 @@ func (f *FlagSet) parseOne() (bool, string, error) { } } else { // It must have a value, which might be the next argument. - if !has_value && len(f.args) > 0 { + if !hasValue && len(f.args) > 0 { // value is the next arg - has_value = true + hasValue = true value, f.args = f.args[0], f.args[1:] } - if !has_value { + if !hasValue { return false, "", f.failf("flag needs an argument: -%s", name) } if err := flag.Value.Set(value); err != nil { @@ -1054,6 +1053,42 @@ func (f *FlagSet) Parse(arguments []string) error { return nil } +// ParseFlags is a utility function that adds a help flag if withHelp is true, +// calls cmd.Parse(args) and prints a relevant error message if there are +// incorrect number of arguments. It returns error only if error handling is +// set to ContinueOnError and parsing fails. If error handling is set to +// ExitOnError, it's safe to ignore the return value. +func (cmd *FlagSet) ParseFlags(args []string, withHelp bool) error { + var help *bool + if withHelp { + help = cmd.Bool([]string{"#help", "-help"}, false, "Print usage") + } + if err := cmd.Parse(args); err != nil { + return err + } + if help != nil && *help { + cmd.Usage() + // just in case Usage does not exit + os.Exit(0) + } + if str := cmd.CheckArgs(); str != "" { + cmd.ReportError(str, withHelp) + } + return nil +} + +func (cmd *FlagSet) ReportError(str string, withHelp bool) { + if withHelp { + if os.Args[0] == cmd.Name() { + str += ". See '" + os.Args[0] + " --help'" + } else { + str += ". See '" + os.Args[0] + " " + cmd.Name() + " --help'" + } + } + fmt.Fprintf(cmd.Out(), "docker: %s.\n", str) + os.Exit(1) +} + // Parsed reports whether f.Parse has been called. func (f *FlagSet) Parsed() bool { return f.parsed diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go index a59b5896..f166cb2f 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go @@ -8,12 +8,25 @@ package mount import "C" const ( - RDONLY = C.MNT_RDONLY - NOSUID = C.MNT_NOSUID - NOEXEC = C.MNT_NOEXEC - SYNCHRONOUS = C.MNT_SYNCHRONOUS - NOATIME = C.MNT_NOATIME + // RDONLY will mount the filesystem as read-only. + RDONLY = C.MNT_RDONLY + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = C.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = C.MNT_NOEXEC + + // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. + SYNCHRONOUS = C.MNT_SYNCHRONOUS + + // NOATIME will not update the file access time when reading from a file. + NOATIME = C.MNT_NOATIME +) + +// These flags are unsupported. +const ( BIND = 0 DIRSYNC = 0 MANDLOCK = 0 diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go index 9986621c..2f9f5c58 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go @@ -5,26 +5,81 @@ import ( ) const ( - RDONLY = syscall.MS_RDONLY - NOSUID = syscall.MS_NOSUID - NODEV = syscall.MS_NODEV - NOEXEC = syscall.MS_NOEXEC + // RDONLY will mount the file system read-only. + RDONLY = syscall.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = syscall.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = syscall.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = syscall.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. SYNCHRONOUS = syscall.MS_SYNCHRONOUS - DIRSYNC = syscall.MS_DIRSYNC - REMOUNT = syscall.MS_REMOUNT - MANDLOCK = syscall.MS_MANDLOCK - NOATIME = syscall.MS_NOATIME - NODIRATIME = syscall.MS_NODIRATIME - BIND = syscall.MS_BIND - RBIND = syscall.MS_BIND | syscall.MS_REC - UNBINDABLE = syscall.MS_UNBINDABLE + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: creat, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = syscall.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = syscall.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = syscall.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = syscall.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = syscall.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = syscall.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = syscall.MS_BIND | syscall.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = syscall.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC - PRIVATE = syscall.MS_PRIVATE - RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC - SLAVE = syscall.MS_SLAVE - RSLAVE = syscall.MS_SLAVE | syscall.MS_REC - SHARED = syscall.MS_SHARED - RSHARED = syscall.MS_SHARED | syscall.MS_REC - RELATIME = syscall.MS_RELATIME + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = syscall.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = syscall.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = syscall.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = syscall.MS_SHARED | syscall.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = syscall.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. STRICTATIME = syscall.MS_STRICTATIME ) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go index c4f82176..a90d3d11 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go @@ -2,6 +2,7 @@ package mount +// These flags are unsupported. const ( BIND = 0 DIRSYNC = 0 diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go index 5ca73160..9a20df21 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go @@ -4,11 +4,12 @@ import ( "time" ) +// GetMounts retrieves a list of mounts for the current running process. func GetMounts() ([]*MountInfo, error) { return parseMountTable() } -// Looks at /proc/self/mountinfo to determine of the specified +// Mounted looks at /proc/self/mountinfo to determine of the specified // mountpoint has been mounted func Mounted(mountpoint string) (bool, error) { entries, err := parseMountTable() @@ -25,9 +26,10 @@ func Mounted(mountpoint string) (bool, error) { return false, nil } -// Mount the specified options at the target path only if -// the target is not mounted -// Options must be specified as fstab style +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. func Mount(device, target, mType, options string) error { flag, _ := parseOptions(options) if flag&REMOUNT != REMOUNT { @@ -38,9 +40,10 @@ func Mount(device, target, mType, options string) error { return ForceMount(device, target, mType, options) } -// Mount the specified options at the target path -// reguardless if the target is mounted or not -// Options must be specified as fstab style +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. func ForceMount(device, target, mType, options string) error { flag, data := parseOptions(options) if err := mount(device, target, mType, uintptr(flag), data); err != nil { @@ -49,7 +52,7 @@ func ForceMount(device, target, mType, options string) error { return nil } -// Unmount the target only if it is mounted +// Unmount will unmount the target filesystem, so long as it is mounted. func Unmount(target string) error { if mounted, err := Mounted(target); err != nil || !mounted { return err @@ -57,7 +60,8 @@ func Unmount(target string) error { return ForceUnmount(target) } -// Unmount the target reguardless if it is mounted or not +// ForceUnmount will force an unmount of the target filesystem, regardless if +// it is mounted or not. func ForceUnmount(target string) (err error) { // Simple retry logic for unmount for i := 0; i < 10; i++ { diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go index ec8e8bca..8ea08648 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go @@ -1,7 +1,40 @@ package mount +// MountInfo reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. type MountInfo struct { - Id, Parent, Major, Minor int - Root, Mountpoint, Opts, Optional string - Fstype, Source, VfsOpts string + // Id is a unique identifier of the mount (may be reused after umount). + Id int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string } diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go index 2fe91862..add7c3b0 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -13,7 +13,8 @@ import ( "unsafe" ) -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts. func parseMountTable() ([]*MountInfo, error) { var rawEntries *C.struct_statfs diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go index 0eb018e2..351a58ea 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -28,7 +28,8 @@ const ( mountinfoFormat = "%d %d %d:%d %s %s %s %s" ) -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from bind mounts +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts func parseMountTable() ([]*MountInfo, error) { f, err := os.Open("/proc/self/mountinfo") if err != nil { @@ -80,7 +81,9 @@ func parseInfoFile(r io.Reader) ([]*MountInfo, error) { return out, nil } -// PidMountInfo collects the mounts for a specific Pid +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. func PidMountInfo(pid int) ([]*MountInfo, error) { f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) if err != nil { diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go index cd9b86ce..47303bbc 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -2,34 +2,50 @@ package mount +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. func MakeShared(mountPoint string) error { return ensureMountedAs(mountPoint, "shared") } +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. func MakeRShared(mountPoint string) error { return ensureMountedAs(mountPoint, "rshared") } +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. func MakePrivate(mountPoint string) error { return ensureMountedAs(mountPoint, "private") } +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. func MakeRPrivate(mountPoint string) error { return ensureMountedAs(mountPoint, "rprivate") } +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. func MakeSlave(mountPoint string) error { return ensureMountedAs(mountPoint, "slave") } +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. func MakeRSlave(mountPoint string) error { return ensureMountedAs(mountPoint, "rslave") } +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. func MakeUnbindable(mountPoint string) error { return ensureMountedAs(mountPoint, "unbindable") } +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. func MakeRUnbindable(mountPoint string) error { return ensureMountedAs(mountPoint, "runbindable") } diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse.go index 9c056bb3..df5486d5 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/parsers/filters/parse.go @@ -58,8 +58,7 @@ func FromParam(p string) (Args, error) { if len(p) == 0 { return args, nil } - err := json.Unmarshal([]byte(p), &args) - if err != nil { + if err := json.NewDecoder(strings.NewReader(p)).Decode(&args); err != nil { return nil, err } return args, nil diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go index 5338a0cf..f366fa67 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools.go @@ -1,5 +1,3 @@ -// +build go1.3 - // Package pools provides a collection of pools which provide various // data types with buffers. These can be used to lower the number of // memory allocations and reuse buffers. diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go deleted file mode 100644 index 48903c23..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/pools/pools_nopool.go +++ /dev/null @@ -1,73 +0,0 @@ -// +build !go1.3 - -package pools - -import ( - "bufio" - "io" - - "github.com/docker/docker/pkg/ioutils" -) - -var ( - BufioReader32KPool *BufioReaderPool - BufioWriter32KPool *BufioWriterPool -) - -const buffer32K = 32 * 1024 - -type BufioReaderPool struct { - size int -} - -func init() { - BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) - BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) -} - -func newBufioReaderPoolWithSize(size int) *BufioReaderPool { - return &BufioReaderPool{size: size} -} - -func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { - return bufio.NewReaderSize(r, bufPool.size) -} - -func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { - b.Reset(nil) -} - -func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { - return ioutils.NewReadCloserWrapper(r, func() error { - if readCloser, ok := r.(io.ReadCloser); ok { - return readCloser.Close() - } - return nil - }) -} - -type BufioWriterPool struct { - size int -} - -func newBufioWriterPoolWithSize(size int) *BufioWriterPool { - return &BufioWriterPool{size: size} -} - -func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { - return bufio.NewWriterSize(w, bufPool.size) -} - -func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - b.Reset(nil) -} - -func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() - if writeCloser, ok := w.(io.WriteCloser); ok { - return writeCloser.Close() - } - return nil - }) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/progressreader/progressreader.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/progressreader/progressreader.go deleted file mode 100644 index 730559e9..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/progressreader/progressreader.go +++ /dev/null @@ -1,69 +0,0 @@ -package progressreader - -import ( - "io" -) - -type StreamFormatter interface { - FormatProg(string, string, interface{}) []byte - FormatStatus(string, string, ...interface{}) []byte - FormatError(error) []byte -} - -type PR_JSONProgress interface { - GetCurrent() int - GetTotal() int -} - -type JSONProg struct { - Current int - Total int -} - -func (j *JSONProg) GetCurrent() int { - return j.Current -} -func (j *JSONProg) GetTotal() int { - return j.Total -} - -// Reader with progress bar -type Config struct { - In io.ReadCloser // Stream to read from - Out io.Writer // Where to send progress bar to - Formatter StreamFormatter - Size int - Current int - LastUpdate int - NewLines bool - ID string - Action string -} - -func New(newReader Config) *Config { - return &newReader -} -func (config *Config) Read(p []byte) (n int, err error) { - read, err := config.In.Read(p) - config.Current += read - updateEvery := 1024 * 512 //512kB - if config.Size > 0 { - // Update progress for every 1% read if 1% < 512kB - if increment := int(0.01 * float64(config.Size)); increment < updateEvery { - updateEvery = increment - } - } - if config.Current-config.LastUpdate > updateEvery || err != nil { - config.Out.Write(config.Formatter.FormatProg(config.ID, config.Action, &JSONProg{Current: config.Current, Total: config.Size})) - config.LastUpdate = config.Current - } - // Send newline when complete - if config.NewLines && err != nil && read == 0 { - config.Out.Write(config.Formatter.FormatStatus("", "")) - } - return read, err -} -func (config *Config) Close() error { - config.Out.Write(config.Formatter.FormatProg(config.ID, config.Action, &JSONProg{Current: config.Current, Total: config.Size})) - return config.In.Close() -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go index 6c1ed2e3..d0e43b37 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat.go @@ -6,10 +6,13 @@ import ( "syscall" ) +// Lstat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist func Lstat(path string) (*Stat_t, error) { s := &syscall.Stat_t{} - err := syscall.Lstat(path, s) - if err != nil { + if err := syscall.Lstat(path, s); err != nil { return nil, err } return fromStatT(s) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go index 9bab4d7b..6bac492e 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/lstat_test.go @@ -5,6 +5,7 @@ import ( "testing" ) +// TestLstat tests Lstat for existing and non existing files func TestLstat(t *testing.T) { file, invalid, _, dir := prepareFiles(t) defer os.RemoveAll(dir) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go index b7de3ff7..e2ca1400 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -15,8 +15,8 @@ var ( ErrMalformed = errors.New("malformed file") ) -// Retrieve memory statistics of the host system and parse them into a MemInfo -// type. +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. func ReadMemInfo() (*MemInfo, error) { file, err := os.Open("/proc/meminfo") if err != nil { @@ -26,6 +26,10 @@ func ReadMemInfo() (*MemInfo, error) { return parseMemInfo(file) } +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given a io.Reader to the file. +// +// Throws error if there are problems reading from the file func parseMemInfo(reader io.Reader) (*MemInfo, error) { meminfo := &MemInfo{} scanner := bufio.NewScanner(reader) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go index 377405ea..10ddf796 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/meminfo_linux_test.go @@ -7,6 +7,7 @@ import ( "github.com/docker/docker/pkg/units" ) +// TestMemInfo tests parseMemInfo with a static meminfo string func TestMemInfo(t *testing.T) { const input = ` MemTotal: 1 kB diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go index 06f9c6af..26617eb0 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/mknod.go @@ -6,6 +6,8 @@ import ( "syscall" ) +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev func Mknod(path string, mode uint32, dev int) error { return syscall.Mknod(path, mode, dev) } diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go index 186e8528..ba22b4dd 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat.go @@ -4,6 +4,8 @@ import ( "syscall" ) +// Stat_t type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file type Stat_t struct { mode uint32 uid uint32 diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go index 072728d0..3899b3e0 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_linux.go @@ -4,6 +4,7 @@ import ( "syscall" ) +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { return &Stat_t{size: s.Size, mode: s.Mode, @@ -13,10 +14,13 @@ func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { mtim: s.Mtim}, nil } +// Stat takes a path to a file and returns +// a system.Stat_t type pertaining to that file. +// +// Throws an error if the file does not exist func Stat(path string) (*Stat_t, error) { s := &syscall.Stat_t{} - err := syscall.Stat(path, s) - if err != nil { + if err := syscall.Stat(path, s); err != nil { return nil, err } return fromStatT(s) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go index abcc8ea7..45341292 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_test.go @@ -6,6 +6,7 @@ import ( "testing" ) +// TestFromStatT tests fromStatT for a tempfile func TestFromStatT(t *testing.T) { file, _, _, dir := prepareFiles(t) defer os.RemoveAll(dir) diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go index 66323eee..7e0d0348 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/stat_unsupported.go @@ -6,6 +6,7 @@ import ( "syscall" ) +// fromStatT creates a system.Stat_t type from a syscall.Stat_t type func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { return &Stat_t{size: s.Size, mode: uint32(s.Mode), diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go index 1dea47cc..350cce1e 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/system/utimes_test.go @@ -8,6 +8,7 @@ import ( "testing" ) +// prepareFiles creates files for testing in the temp directory func prepareFiles(t *testing.T) (string, string, string, string) { dir, err := ioutil.TempDir("", "docker-system-test") if err != nil { diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go deleted file mode 100644 index ae9516c9..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build linux,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go deleted file mode 100644 index 266039ba..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows -// +build !linux !cgo - -package term - -import ( - "syscall" - "unsafe" -) - -func tcget(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) - return err -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) - return err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go deleted file mode 100644 index b945a3dc..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build !windows - -package term - -import ( - "errors" - "io" - "os" - "os/signal" - "syscall" - "unsafe" -) - -var ( - ErrInvalidState = errors.New("Invalid terminal state") -) - -type State struct { - termios Termios -} - -type Winsize struct { - Height uint16 - Width uint16 - x uint16 - y uint16 -} - -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr -} - -func GetFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - return inFd, isTerminalIn -} - -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return ws, nil - } - return ws, err -} - -func SetWinsize(fd uintptr, ws *Winsize) error { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return nil - } - return err -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - var termios Termios - return tcget(fd, &termios) == 0 -} - -// Restore restores the terminal connected to the given file descriptor to a -// previous state. -func RestoreTerminal(fd uintptr, state *State) error { - if state == nil { - return ErrInvalidState - } - if err := tcset(fd, &state.termios); err != 0 { - return err - } - return nil -} - -func SaveState(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - return &oldState, nil -} - -func DisableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= syscall.ECHO - - if err := tcset(fd, &newState); err != 0 { - return err - } - handleInterrupt(fd, state) - return nil -} - -func SetRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - handleInterrupt(fd, oldState) - return oldState, err -} - -func handleInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go deleted file mode 100644 index abda841c..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go +++ /dev/null @@ -1,137 +0,0 @@ -// +build windows -package term - -import ( - "io" - "os" - - "github.com/docker/docker/pkg/term/winconsole" -) - -// State holds the console mode for the terminal. -type State struct { - mode uint32 -} - -// Winsize is used for window size. -type Winsize struct { - Height uint16 - Width uint16 - x uint16 - y uint16 -} - -// GetWinsize gets the window size of the given terminal -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - var info *winconsole.CONSOLE_SCREEN_BUFFER_INFO - info, err := winconsole.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil, err - } - - ws.Width = uint16(info.Window.Right - info.Window.Left + 1) - ws.Height = uint16(info.Window.Bottom - info.Window.Top + 1) - - ws.x = 0 // todo azlinux -- this is the pixel size of the Window, and not currently used by any caller - ws.y = 0 - - return ws, nil -} - -// SetWinsize sets the terminal connected to the given file descriptor to a -// given size. -func SetWinsize(fd uintptr, ws *Winsize) error { - return nil -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - _, e := winconsole.GetConsoleMode(fd) - return e == nil -} - -// RestoreTerminal restores the terminal connected to the given file descriptor to a -// previous state. -func RestoreTerminal(fd uintptr, state *State) error { - return winconsole.SetConsoleMode(fd, state.mode) -} - -// SaveState saves the state of the given console -func SaveState(fd uintptr) (*State, error) { - mode, e := winconsole.GetConsoleMode(fd) - if e != nil { - return nil, e - } - return &State{mode}, nil -} - -// DisableEcho disbales the echo for given file descriptor and returns previous state -// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx for these flag settings -func DisableEcho(fd uintptr, state *State) error { - state.mode &^= (winconsole.ENABLE_ECHO_INPUT) - state.mode |= (winconsole.ENABLE_PROCESSED_INPUT | winconsole.ENABLE_LINE_INPUT) - return winconsole.SetConsoleMode(fd, state.mode) -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func SetRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - // TODO (azlinux): implement handling interrupt and restore state of terminal - return oldState, err -} - -// MakeRaw puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var state *State - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - // All three input modes, along with processed output mode, are designed to work together. - // It is best to either enable or disable all of these modes as a group. - // When all are enabled, the application is said to be in "cooked" mode, which means that most of the processing is handled for the application. - // When all are disabled, the application is in "raw" mode, which means that input is unfiltered and any processing is left to the application. - state.mode = 0 - err = winconsole.SetConsoleMode(fd, state.mode) - if err != nil { - return nil, err - } - return state, nil -} - -// GetFdInfo returns file descriptor and bool indicating whether the file is a terminal -func GetFdInfo(in interface{}) (uintptr, bool) { - return winconsole.GetHandleInfo(in) -} - -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - var shouldEmulateANSI bool - switch { - case os.Getenv("ConEmuANSI") == "ON": - // ConEmu shell, ansi emulated by default and ConEmu does an extensively - // good emulation. - shouldEmulateANSI = false - case os.Getenv("MSYSTEM") != "": - // MSYS (mingw) cannot fully emulate well and still shows escape characters - // mostly because it's still running on cmd.exe window. - shouldEmulateANSI = true - default: - shouldEmulateANSI = true - } - - if shouldEmulateANSI { - return winconsole.StdStreams() - } - - return os.Stdin, os.Stdout, os.Stderr -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go deleted file mode 100644 index 11cd70d1..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go +++ /dev/null @@ -1,65 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA - - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -type Termios struct { - Iflag uint64 - Oflag uint64 - Cflag uint64 - Lflag uint64 - Cc [20]byte - Ispeed uint64 - Ospeed uint64 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go deleted file mode 100644 index ed365957..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go +++ /dev/null @@ -1,65 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA - - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go deleted file mode 100644 index 024187ff..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build !cgo - -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TCGETS - setTermios = syscall.TCSETS -) - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - return &oldState, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/console_windows.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/console_windows.go deleted file mode 100644 index 19977b10..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/console_windows.go +++ /dev/null @@ -1,1042 +0,0 @@ -// +build windows - -package winconsole - -import ( - "bytes" - "fmt" - "io" - "os" - "strconv" - "strings" - "sync" - "syscall" - "unsafe" -) - -const ( - // Consts for Get/SetConsoleMode function - // see http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx - ENABLE_ECHO_INPUT = 0x0004 - ENABLE_INSERT_MODE = 0x0020 - ENABLE_LINE_INPUT = 0x0002 - ENABLE_MOUSE_INPUT = 0x0010 - ENABLE_PROCESSED_INPUT = 0x0001 - ENABLE_QUICK_EDIT_MODE = 0x0040 - ENABLE_WINDOW_INPUT = 0x0008 - // If parameter is a screen buffer handle, additional values - ENABLE_PROCESSED_OUTPUT = 0x0001 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 - - //http://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes - FOREGROUND_BLUE = 1 - FOREGROUND_GREEN = 2 - FOREGROUND_RED = 4 - FOREGROUND_INTENSITY = 8 - FOREGROUND_MASK_SET = 0x000F - FOREGROUND_MASK_UNSET = 0xFFF0 - - BACKGROUND_BLUE = 16 - BACKGROUND_GREEN = 32 - BACKGROUND_RED = 64 - BACKGROUND_INTENSITY = 128 - BACKGROUND_MASK_SET = 0x00F0 - BACKGROUND_MASK_UNSET = 0xFF0F - - COMMON_LVB_REVERSE_VIDEO = 0x4000 - COMMON_LVB_UNDERSCORE = 0x8000 - - // http://man7.org/linux/man-pages/man4/console_codes.4.html - // ECMA-48 Set Graphics Rendition - ANSI_ATTR_RESET = 0 - ANSI_ATTR_BOLD = 1 - ANSI_ATTR_DIM = 2 - ANSI_ATTR_UNDERLINE = 4 - ANSI_ATTR_BLINK = 5 - ANSI_ATTR_REVERSE = 7 - ANSI_ATTR_INVISIBLE = 8 - - ANSI_ATTR_UNDERLINE_OFF = 24 - ANSI_ATTR_BLINK_OFF = 25 - ANSI_ATTR_REVERSE_OFF = 27 - ANSI_ATTR_INVISIBLE_OFF = 8 - - ANSI_FOREGROUND_BLACK = 30 - ANSI_FOREGROUND_RED = 31 - ANSI_FOREGROUND_GREEN = 32 - ANSI_FOREGROUND_YELLOW = 33 - ANSI_FOREGROUND_BLUE = 34 - ANSI_FOREGROUND_MAGENTA = 35 - ANSI_FOREGROUND_CYAN = 36 - ANSI_FOREGROUND_WHITE = 37 - ANSI_FOREGROUND_DEFAULT = 39 - - ANSI_BACKGROUND_BLACK = 40 - ANSI_BACKGROUND_RED = 41 - ANSI_BACKGROUND_GREEN = 42 - ANSI_BACKGROUND_YELLOW = 43 - ANSI_BACKGROUND_BLUE = 44 - ANSI_BACKGROUND_MAGENTA = 45 - ANSI_BACKGROUND_CYAN = 46 - ANSI_BACKGROUND_WHITE = 47 - ANSI_BACKGROUND_DEFAULT = 49 - - ANSI_MAX_CMD_LENGTH = 256 - - MAX_INPUT_EVENTS = 128 - MAX_INPUT_BUFFER = 1024 - DEFAULT_WIDTH = 80 - DEFAULT_HEIGHT = 24 -) - -// http://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx -const ( - VK_PRIOR = 0x21 // PAGE UP key - VK_NEXT = 0x22 // PAGE DOWN key - VK_END = 0x23 // END key - VK_HOME = 0x24 // HOME key - VK_LEFT = 0x25 // LEFT ARROW key - VK_UP = 0x26 // UP ARROW key - VK_RIGHT = 0x27 //RIGHT ARROW key - VK_DOWN = 0x28 //DOWN ARROW key - VK_SELECT = 0x29 //SELECT key - VK_PRINT = 0x2A //PRINT key - VK_EXECUTE = 0x2B //EXECUTE key - VK_SNAPSHOT = 0x2C //PRINT SCREEN key - VK_INSERT = 0x2D //INS key - VK_DELETE = 0x2E //DEL key - VK_HELP = 0x2F //HELP key - VK_F1 = 0x70 //F1 key - VK_F2 = 0x71 //F2 key - VK_F3 = 0x72 //F3 key - VK_F4 = 0x73 //F4 key - VK_F5 = 0x74 //F5 key - VK_F6 = 0x75 //F6 key - VK_F7 = 0x76 //F7 key - VK_F8 = 0x77 //F8 key - VK_F9 = 0x78 //F9 key - VK_F10 = 0x79 //F10 key - VK_F11 = 0x7A //F11 key - VK_F12 = 0x7B //F12 key -) - -var kernel32DLL = syscall.NewLazyDLL("kernel32.dll") - -var ( - setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") - getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") - setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") - setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") - fillConsoleOutputCharacterProc = kernel32DLL.NewProc("FillConsoleOutputCharacterW") - writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") - readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") - getNumberOfConsoleInputEventsProc = kernel32DLL.NewProc("GetNumberOfConsoleInputEvents") - getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") - setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") - setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") - setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") -) - -// types for calling various windows API -// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx -type ( - SHORT int16 - SMALL_RECT struct { - Left SHORT - Top SHORT - Right SHORT - Bottom SHORT - } - - COORD struct { - X SHORT - Y SHORT - } - - BOOL int32 - WORD uint16 - WCHAR uint16 - DWORD uint32 - - CONSOLE_SCREEN_BUFFER_INFO struct { - Size COORD - CursorPosition COORD - Attributes WORD - Window SMALL_RECT - MaximumWindowSize COORD - } - - CONSOLE_CURSOR_INFO struct { - Size DWORD - Visible BOOL - } - - // http://msdn.microsoft.com/en-us/library/windows/desktop/ms684166(v=vs.85).aspx - KEY_EVENT_RECORD struct { - KeyDown BOOL - RepeatCount WORD - VirtualKeyCode WORD - VirtualScanCode WORD - UnicodeChar WCHAR - ControlKeyState DWORD - } - - INPUT_RECORD struct { - EventType WORD - KeyEvent KEY_EVENT_RECORD - } - - CHAR_INFO struct { - UnicodeChar WCHAR - Attributes WORD - } -) - -// Implements the TerminalEmulator interface -type WindowsTerminal struct { - outMutex sync.Mutex - inMutex sync.Mutex - inputBuffer []byte - inputSize int - inputEvents []INPUT_RECORD - screenBufferInfo *CONSOLE_SCREEN_BUFFER_INFO - inputEscapeSequence []byte -} - -func getStdHandle(stdhandle int) uintptr { - handle, err := syscall.GetStdHandle(stdhandle) - if err != nil { - panic(fmt.Errorf("could not get standard io handle %d", stdhandle)) - } - return uintptr(handle) -} - -func StdStreams() (stdIn io.ReadCloser, stdOut io.Writer, stdErr io.Writer) { - handler := &WindowsTerminal{ - inputBuffer: make([]byte, MAX_INPUT_BUFFER), - inputEscapeSequence: []byte(KEY_ESC_CSI), - inputEvents: make([]INPUT_RECORD, MAX_INPUT_EVENTS), - } - - if IsTerminal(os.Stdin.Fd()) { - stdIn = &terminalReader{ - wrappedReader: os.Stdin, - emulator: handler, - command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), - fd: getStdHandle(syscall.STD_INPUT_HANDLE), - } - } else { - stdIn = os.Stdin - } - - if IsTerminal(os.Stdout.Fd()) { - stdoutHandle := getStdHandle(syscall.STD_OUTPUT_HANDLE) - - // Save current screen buffer info - screenBufferInfo, err := GetConsoleScreenBufferInfo(stdoutHandle) - if err != nil { - // If GetConsoleScreenBufferInfo returns a nil error, it usually means that stdout is not a TTY. - // However, this is in the branch where stdout is a TTY, hence the panic. - panic("could not get console screen buffer info") - } - handler.screenBufferInfo = screenBufferInfo - - // Set the window size - SetWindowSize(stdoutHandle, DEFAULT_WIDTH, DEFAULT_HEIGHT, DEFAULT_HEIGHT) - buffer = make([]CHAR_INFO, screenBufferInfo.MaximumWindowSize.X*screenBufferInfo.MaximumWindowSize.Y) - - stdOut = &terminalWriter{ - wrappedWriter: os.Stdout, - emulator: handler, - command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), - fd: stdoutHandle, - } - } else { - stdOut = os.Stdout - } - - if IsTerminal(os.Stderr.Fd()) { - stdErr = &terminalWriter{ - wrappedWriter: os.Stderr, - emulator: handler, - command: make([]byte, 0, ANSI_MAX_CMD_LENGTH), - fd: getStdHandle(syscall.STD_ERROR_HANDLE), - } - } else { - stdErr = os.Stderr - } - - return stdIn, stdOut, stdErr -} - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a terminal -func GetHandleInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - if tr, ok := in.(*terminalReader); ok { - if file, ok := tr.wrappedReader.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - } - return inFd, isTerminalIn -} - -func getError(r1, r2 uintptr, lastErr error) error { - // If the function fails, the return value is zero. - if r1 == 0 { - if lastErr != nil { - return lastErr - } - return syscall.EINVAL - } - return nil -} - -// GetConsoleMode gets the console mode for given file descriptor -// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx -func GetConsoleMode(handle uintptr) (uint32, error) { - var mode uint32 - err := syscall.GetConsoleMode(syscall.Handle(handle), &mode) - return mode, err -} - -// SetConsoleMode sets the console mode for given file descriptor -// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx -func SetConsoleMode(handle uintptr, mode uint32) error { - return getError(setConsoleModeProc.Call(handle, uintptr(mode), 0)) -} - -// SetCursorVisible sets the cursor visbility -// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx -func SetCursorVisible(handle uintptr, isVisible BOOL) (bool, error) { - var cursorInfo CONSOLE_CURSOR_INFO - if err := getError(getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(&cursorInfo)), 0)); err != nil { - return false, err - } - cursorInfo.Visible = isVisible - if err := getError(setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(&cursorInfo)), 0)); err != nil { - return false, err - } - return true, nil -} - -// SetWindowSize sets the size of the console window. -func SetWindowSize(handle uintptr, width, height, max SHORT) (bool, error) { - window := SMALL_RECT{Left: 0, Top: 0, Right: width - 1, Bottom: height - 1} - coord := COORD{X: width - 1, Y: max} - if err := getError(setConsoleWindowInfoProc.Call(handle, uintptr(1), uintptr(unsafe.Pointer(&window)))); err != nil { - return false, err - } - if err := getError(setConsoleScreenBufferSizeProc.Call(handle, marshal(coord))); err != nil { - return false, err - } - return true, nil -} - -// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. -// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx -func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { - var info CONSOLE_SCREEN_BUFFER_INFO - if err := getError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)); err != nil { - return nil, err - } - return &info, nil -} - -// setConsoleTextAttribute sets the attributes of characters written to the -// console screen buffer by the WriteFile or WriteConsole function, -// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx -func setConsoleTextAttribute(handle uintptr, attribute WORD) error { - return getError(setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)) -} - -func writeConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) (bool, error) { - if err := getError(writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), marshal(bufferSize), marshal(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))); err != nil { - return false, err - } - return true, nil -} - -// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682663(v=vs.85).aspx -func fillConsoleOutputCharacter(handle uintptr, fillChar byte, length uint32, writeCord COORD) (bool, error) { - out := int64(0) - if err := getError(fillConsoleOutputCharacterProc.Call(handle, uintptr(fillChar), uintptr(length), marshal(writeCord), uintptr(unsafe.Pointer(&out)))); err != nil { - return false, err - } - return true, nil -} - -// Gets the number of space characters to write for "clearing" the section of terminal -func getNumberOfChars(fromCoord COORD, toCoord COORD, screenSize COORD) uint32 { - // must be valid cursor position - if fromCoord.X < 0 || fromCoord.Y < 0 || toCoord.X < 0 || toCoord.Y < 0 { - return 0 - } - if fromCoord.X >= screenSize.X || fromCoord.Y >= screenSize.Y || toCoord.X >= screenSize.X || toCoord.Y >= screenSize.Y { - return 0 - } - // can't be backwards - if fromCoord.Y > toCoord.Y { - return 0 - } - // same line - if fromCoord.Y == toCoord.Y { - return uint32(toCoord.X-fromCoord.X) + 1 - } - // spans more than one line - if fromCoord.Y < toCoord.Y { - // from start till end of line for first line + from start of line till end - retValue := uint32(screenSize.X-fromCoord.X) + uint32(toCoord.X) + 1 - // don't count first and last line - linesBetween := toCoord.Y - fromCoord.Y - 1 - if linesBetween > 0 { - retValue = retValue + uint32(linesBetween*screenSize.X) - } - return retValue - } - return 0 -} - -var buffer []CHAR_INFO - -func clearDisplayRect(handle uintptr, fillChar rune, attributes WORD, fromCoord COORD, toCoord COORD, windowSize COORD) (uint32, error) { - var writeRegion SMALL_RECT - writeRegion.Top = fromCoord.Y - writeRegion.Left = fromCoord.X - writeRegion.Right = toCoord.X - writeRegion.Bottom = toCoord.Y - - // allocate and initialize buffer - width := toCoord.X - fromCoord.X + 1 - height := toCoord.Y - fromCoord.Y + 1 - size := width * height - if size > 0 { - for i := 0; i < int(size); i++ { - buffer[i].UnicodeChar = WCHAR(fillChar) - buffer[i].Attributes = attributes - } - - // Write to buffer - r, err := writeConsoleOutput(handle, buffer[:size], windowSize, COORD{X: 0, Y: 0}, &writeRegion) - if !r { - if err != nil { - return 0, err - } - return 0, syscall.EINVAL - } - } - return uint32(size), nil -} - -func clearDisplayRange(handle uintptr, fillChar rune, attributes WORD, fromCoord COORD, toCoord COORD, windowSize COORD) (uint32, error) { - nw := uint32(0) - // start and end on same line - if fromCoord.Y == toCoord.Y { - return clearDisplayRect(handle, fillChar, attributes, fromCoord, toCoord, windowSize) - } - // TODO(azlinux): if full screen, optimize - - // spans more than one line - if fromCoord.Y < toCoord.Y { - // from start position till end of line for first line - n, err := clearDisplayRect(handle, fillChar, attributes, fromCoord, COORD{X: windowSize.X - 1, Y: fromCoord.Y}, windowSize) - if err != nil { - return nw, err - } - nw += n - // lines between - linesBetween := toCoord.Y - fromCoord.Y - 1 - if linesBetween > 0 { - n, err = clearDisplayRect(handle, fillChar, attributes, COORD{X: 0, Y: fromCoord.Y + 1}, COORD{X: windowSize.X - 1, Y: toCoord.Y - 1}, windowSize) - if err != nil { - return nw, err - } - nw += n - } - // lines at end - n, err = clearDisplayRect(handle, fillChar, attributes, COORD{X: 0, Y: toCoord.Y}, toCoord, windowSize) - if err != nil { - return nw, err - } - nw += n - } - return nw, nil -} - -// setConsoleCursorPosition sets the console cursor position -// Note The X and Y are zero based -// If relative is true then the new position is relative to current one -func setConsoleCursorPosition(handle uintptr, isRelative bool, column int16, line int16) error { - screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) - if err != nil { - return err - } - var position COORD - if isRelative { - position.X = screenBufferInfo.CursorPosition.X + SHORT(column) - position.Y = screenBufferInfo.CursorPosition.Y + SHORT(line) - } else { - position.X = SHORT(column) - position.Y = SHORT(line) - } - return getError(setConsoleCursorPositionProc.Call(handle, marshal(position), 0)) -} - -// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683207(v=vs.85).aspx -func getNumberOfConsoleInputEvents(handle uintptr) (uint16, error) { - var n WORD - if err := getError(getNumberOfConsoleInputEventsProc.Call(handle, uintptr(unsafe.Pointer(&n)))); err != nil { - return 0, err - } - return uint16(n), nil -} - -//http://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx -func readConsoleInputKey(handle uintptr, inputBuffer []INPUT_RECORD) (int, error) { - var nr WORD - if err := getError(readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&inputBuffer[0])), uintptr(len(inputBuffer)), uintptr(unsafe.Pointer(&nr)))); err != nil { - return 0, err - } - return int(nr), nil -} - -func getWindowsTextAttributeForAnsiValue(originalFlag WORD, defaultValue WORD, ansiValue int16) (WORD, error) { - flag := WORD(originalFlag) - if flag == 0 { - flag = defaultValue - } - switch ansiValue { - case ANSI_ATTR_RESET: - flag &^= COMMON_LVB_UNDERSCORE - flag &^= BACKGROUND_INTENSITY - flag = flag | FOREGROUND_INTENSITY - case ANSI_ATTR_INVISIBLE: - // TODO: how do you reset reverse? - case ANSI_ATTR_UNDERLINE: - flag = flag | COMMON_LVB_UNDERSCORE - case ANSI_ATTR_BLINK: - // seems like background intenisty is blink - flag = flag | BACKGROUND_INTENSITY - case ANSI_ATTR_UNDERLINE_OFF: - flag &^= COMMON_LVB_UNDERSCORE - case ANSI_ATTR_BLINK_OFF: - // seems like background intenisty is blink - flag &^= BACKGROUND_INTENSITY - case ANSI_ATTR_BOLD: - flag = flag | FOREGROUND_INTENSITY - case ANSI_ATTR_DIM: - flag &^= FOREGROUND_INTENSITY - case ANSI_ATTR_REVERSE, ANSI_ATTR_REVERSE_OFF: - // swap forground and background bits - foreground := flag & FOREGROUND_MASK_SET - background := flag & BACKGROUND_MASK_SET - flag = (flag & BACKGROUND_MASK_UNSET & FOREGROUND_MASK_UNSET) | (foreground << 4) | (background >> 4) - - // FOREGROUND - case ANSI_FOREGROUND_DEFAULT: - flag = (flag & FOREGROUND_MASK_UNSET) | (defaultValue & FOREGROUND_MASK_SET) - case ANSI_FOREGROUND_BLACK: - flag = flag ^ (FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE) - case ANSI_FOREGROUND_RED: - flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED - case ANSI_FOREGROUND_GREEN: - flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_GREEN - case ANSI_FOREGROUND_YELLOW: - flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED | FOREGROUND_GREEN - case ANSI_FOREGROUND_BLUE: - flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_BLUE - case ANSI_FOREGROUND_MAGENTA: - flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED | FOREGROUND_BLUE - case ANSI_FOREGROUND_CYAN: - flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_GREEN | FOREGROUND_BLUE - case ANSI_FOREGROUND_WHITE: - flag = (flag & FOREGROUND_MASK_UNSET) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - - // Background - case ANSI_BACKGROUND_DEFAULT: - // Black with no intensity - flag = (flag & BACKGROUND_MASK_UNSET) | (defaultValue & BACKGROUND_MASK_SET) - case ANSI_BACKGROUND_BLACK: - flag = (flag & BACKGROUND_MASK_UNSET) - case ANSI_BACKGROUND_RED: - flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED - case ANSI_BACKGROUND_GREEN: - flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_GREEN - case ANSI_BACKGROUND_YELLOW: - flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED | BACKGROUND_GREEN - case ANSI_BACKGROUND_BLUE: - flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_BLUE - case ANSI_BACKGROUND_MAGENTA: - flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED | BACKGROUND_BLUE - case ANSI_BACKGROUND_CYAN: - flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_GREEN | BACKGROUND_BLUE - case ANSI_BACKGROUND_WHITE: - flag = (flag & BACKGROUND_MASK_UNSET) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE - } - return flag, nil -} - -// HandleOutputCommand interpretes the Ansi commands and then makes appropriate Win32 calls -func (term *WindowsTerminal) HandleOutputCommand(handle uintptr, command []byte) (n int, err error) { - // always consider all the bytes in command, processed - n = len(command) - - parsedCommand := parseAnsiCommand(command) - - // console settings changes need to happen in atomic way - term.outMutex.Lock() - defer term.outMutex.Unlock() - - switch parsedCommand.Command { - case "m": - // [Value;...;Valuem - // Set Graphics Mode: - // Calls the graphics functions specified by the following values. - // These specified functions remain active until the next occurrence of this escape sequence. - // Graphics mode changes the colors and attributes of text (such as bold and underline) displayed on the screen. - screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) - if err != nil { - return n, err - } - flag := screenBufferInfo.Attributes - for _, e := range parsedCommand.Parameters { - value, _ := strconv.ParseInt(e, 10, 16) // base 10, 16 bit - if value == ANSI_ATTR_RESET { - flag = term.screenBufferInfo.Attributes // reset - } else { - flag, err = getWindowsTextAttributeForAnsiValue(flag, term.screenBufferInfo.Attributes, int16(value)) - if err != nil { - return n, err - } - } - } - if err := setConsoleTextAttribute(handle, flag); err != nil { - return n, err - } - case "H", "f": - // [line;columnH - // [line;columnf - // Moves the cursor to the specified position (coordinates). - // If you do not specify a position, the cursor moves to the home position at the upper-left corner of the screen (line 0, column 0). - screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) - if err != nil { - return n, err - } - line, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) - if err != nil { - return n, err - } - if line > int16(screenBufferInfo.Window.Bottom) { - line = int16(screenBufferInfo.Window.Bottom) - } - column, err := parseInt16OrDefault(parsedCommand.getParam(1), 1) - if err != nil { - return n, err - } - if column > int16(screenBufferInfo.Window.Right) { - column = int16(screenBufferInfo.Window.Right) - } - // The numbers are not 0 based, but 1 based - if err := setConsoleCursorPosition(handle, false, column-1, line-1); err != nil { - return n, err - } - - case "A": - // [valueA - // Moves the cursor up by the specified number of lines without changing columns. - // If the cursor is already on the top line, ignores this sequence. - value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) - if err != nil { - return len(command), err - } - if err := setConsoleCursorPosition(handle, true, 0, -value); err != nil { - return n, err - } - case "B": - // [valueB - // Moves the cursor down by the specified number of lines without changing columns. - // If the cursor is already on the bottom line, ignores this sequence. - value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) - if err != nil { - return n, err - } - if err := setConsoleCursorPosition(handle, true, 0, value); err != nil { - return n, err - } - case "C": - // [valueC - // Moves the cursor forward by the specified number of columns without changing lines. - // If the cursor is already in the rightmost column, ignores this sequence. - value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) - if err != nil { - return n, err - } - if err := setConsoleCursorPosition(handle, true, value, 0); err != nil { - return n, err - } - case "D": - // [valueD - // Moves the cursor back by the specified number of columns without changing lines. - // If the cursor is already in the leftmost column, ignores this sequence. - value, err := parseInt16OrDefault(parsedCommand.getParam(0), 1) - if err != nil { - return n, err - } - if err := setConsoleCursorPosition(handle, true, -value, 0); err != nil { - return n, err - } - case "J": - // [J Erases from the cursor to the end of the screen, including the cursor position. - // [1J Erases from the beginning of the screen to the cursor, including the cursor position. - // [2J Erases the complete display. The cursor does not move. - // Clears the screen and moves the cursor to the home position (line 0, column 0). - value, err := parseInt16OrDefault(parsedCommand.getParam(0), 0) - if err != nil { - return n, err - } - var start COORD - var cursor COORD - var end COORD - screenBufferInfo, err := GetConsoleScreenBufferInfo(handle) - if err != nil { - return n, err - } - switch value { - case 0: - start = screenBufferInfo.CursorPosition - // end of the screen - end.X = screenBufferInfo.MaximumWindowSize.X - 1 - end.Y = screenBufferInfo.MaximumWindowSize.Y - 1 - // cursor - cursor = screenBufferInfo.CursorPosition - case 1: - - // start of the screen - start.X = 0 - start.Y = 0 - // end of the screen - end = screenBufferInfo.CursorPosition - // cursor - cursor = screenBufferInfo.CursorPosition - case 2: - // start of the screen - start.X = 0 - start.Y = 0 - // end of the screen - end.X = screenBufferInfo.MaximumWindowSize.X - 1 - end.Y = screenBufferInfo.MaximumWindowSize.Y - 1 - // cursor - cursor.X = 0 - cursor.Y = 0 - } - if _, err := clearDisplayRange(uintptr(handle), ' ', term.screenBufferInfo.Attributes, start, end, screenBufferInfo.MaximumWindowSize); err != nil { - return n, err - } - // remember the the cursor position is 1 based - if err := setConsoleCursorPosition(handle, false, int16(cursor.X), int16(cursor.Y)); err != nil { - return n, err - } - case "K": - // [K - // Clears all characters from the cursor position to the end of the line (including the character at the cursor position). - // [K Erases from the cursor to the end of the line, including the cursor position. - // [1K Erases from the beginning of the line to the cursor, including the cursor position. - // [2K Erases the complete line. - value, err := parseInt16OrDefault(parsedCommand.getParam(0), 0) - var start COORD - var cursor COORD - var end COORD - screenBufferInfo, err := GetConsoleScreenBufferInfo(uintptr(handle)) - if err != nil { - return n, err - } - switch value { - case 0: - // start is where cursor is - start = screenBufferInfo.CursorPosition - // end of line - end.X = screenBufferInfo.MaximumWindowSize.X - 1 - end.Y = screenBufferInfo.CursorPosition.Y - // cursor remains the same - cursor = screenBufferInfo.CursorPosition - - case 1: - // beginning of line - start.X = 0 - start.Y = screenBufferInfo.CursorPosition.Y - // until cursor - end = screenBufferInfo.CursorPosition - // cursor remains the same - cursor = screenBufferInfo.CursorPosition - case 2: - // start of the line - start.X = 0 - start.Y = screenBufferInfo.MaximumWindowSize.Y - 1 - // end of the line - end.X = screenBufferInfo.MaximumWindowSize.X - 1 - end.Y = screenBufferInfo.MaximumWindowSize.Y - 1 - // cursor - cursor.X = 0 - cursor.Y = screenBufferInfo.MaximumWindowSize.Y - 1 - } - if _, err := clearDisplayRange(uintptr(handle), ' ', term.screenBufferInfo.Attributes, start, end, screenBufferInfo.MaximumWindowSize); err != nil { - return n, err - } - // remember the the cursor position is 1 based - if err := setConsoleCursorPosition(uintptr(handle), false, int16(cursor.X), int16(cursor.Y)); err != nil { - return n, err - } - - case "l": - for _, value := range parsedCommand.Parameters { - switch value { - case "?25", "25": - SetCursorVisible(uintptr(handle), BOOL(0)) - case "?1049", "1049": - // TODO (azlinux): Restore terminal - case "?1", "1": - // If the DECCKM function is reset, then the arrow keys send ANSI cursor sequences to the host. - term.inputEscapeSequence = []byte(KEY_ESC_CSI) - } - } - case "h": - for _, value := range parsedCommand.Parameters { - switch value { - case "?25", "25": - SetCursorVisible(uintptr(handle), BOOL(1)) - case "?1049", "1049": - // TODO (azlinux): Save terminal - case "?1", "1": - // If the DECCKM function is set, then the arrow keys send application sequences to the host. - // DECCKM (default off): When set, the cursor keys send an ESC O prefix, rather than ESC [. - term.inputEscapeSequence = []byte(KEY_ESC_O) - } - } - - case "]": - /* - TODO (azlinux): - Linux Console Private CSI Sequences - - The following sequences are neither ECMA-48 nor native VT102. They are - native to the Linux console driver. Colors are in SGR parameters: 0 = - black, 1 = red, 2 = green, 3 = brown, 4 = blue, 5 = magenta, 6 = cyan, - 7 = white. - - ESC [ 1 ; n ] Set color n as the underline color - ESC [ 2 ; n ] Set color n as the dim color - ESC [ 8 ] Make the current color pair the default attributes. - ESC [ 9 ; n ] Set screen blank timeout to n minutes. - ESC [ 10 ; n ] Set bell frequency in Hz. - ESC [ 11 ; n ] Set bell duration in msec. - ESC [ 12 ; n ] Bring specified console to the front. - ESC [ 13 ] Unblank the screen. - ESC [ 14 ; n ] Set the VESA powerdown interval in minutes. - - */ - } - return n, nil -} - -// WriteChars writes the bytes to given writer. -func (term *WindowsTerminal) WriteChars(fd uintptr, w io.Writer, p []byte) (n int, err error) { - if len(p) == 0 { - return 0, nil - } - return w.Write(p) -} - -const ( - CAPSLOCK_ON = 0x0080 //The CAPS LOCK light is on. - ENHANCED_KEY = 0x0100 //The key is enhanced. - LEFT_ALT_PRESSED = 0x0002 //The left ALT key is pressed. - LEFT_CTRL_PRESSED = 0x0008 //The left CTRL key is pressed. - NUMLOCK_ON = 0x0020 //The NUM LOCK light is on. - RIGHT_ALT_PRESSED = 0x0001 //The right ALT key is pressed. - RIGHT_CTRL_PRESSED = 0x0004 //The right CTRL key is pressed. - SCROLLLOCK_ON = 0x0040 //The SCROLL LOCK light is on. - SHIFT_PRESSED = 0x0010 // The SHIFT key is pressed. -) - -const ( - KEY_CONTROL_PARAM_2 = ";2" - KEY_CONTROL_PARAM_3 = ";3" - KEY_CONTROL_PARAM_4 = ";4" - KEY_CONTROL_PARAM_5 = ";5" - KEY_CONTROL_PARAM_6 = ";6" - KEY_CONTROL_PARAM_7 = ";7" - KEY_CONTROL_PARAM_8 = ";8" - KEY_ESC_CSI = "\x1B[" - KEY_ESC_N = "\x1BN" - KEY_ESC_O = "\x1BO" -) - -var keyMapPrefix = map[WORD]string{ - VK_UP: "\x1B[%sA", - VK_DOWN: "\x1B[%sB", - VK_RIGHT: "\x1B[%sC", - VK_LEFT: "\x1B[%sD", - VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - VK_END: "\x1B[4%s~", // showkey shows ^[[4 - VK_INSERT: "\x1B[2%s~", - VK_DELETE: "\x1B[3%s~", - VK_PRIOR: "\x1B[5%s~", - VK_NEXT: "\x1B[6%s~", - VK_F1: "", - VK_F2: "", - VK_F3: "\x1B[13%s~", - VK_F4: "\x1B[14%s~", - VK_F5: "\x1B[15%s~", - VK_F6: "\x1B[17%s~", - VK_F7: "\x1B[18%s~", - VK_F8: "\x1B[19%s~", - VK_F9: "\x1B[20%s~", - VK_F10: "\x1B[21%s~", - VK_F11: "\x1B[23%s~", - VK_F12: "\x1B[24%s~", -} - -var arrowKeyMapPrefix = map[WORD]string{ - VK_UP: "%s%sA", - VK_DOWN: "%s%sB", - VK_RIGHT: "%s%sC", - VK_LEFT: "%s%sD", -} - -func getControlStateParameter(shift, alt, control, meta bool) string { - if shift && alt && control { - return KEY_CONTROL_PARAM_8 - } - if alt && control { - return KEY_CONTROL_PARAM_7 - } - if shift && control { - return KEY_CONTROL_PARAM_6 - } - if control { - return KEY_CONTROL_PARAM_5 - } - if shift && alt { - return KEY_CONTROL_PARAM_4 - } - if alt { - return KEY_CONTROL_PARAM_3 - } - if shift { - return KEY_CONTROL_PARAM_2 - } - return "" -} - -func getControlKeys(controlState DWORD) (shift, alt, control bool) { - shift = 0 != (controlState & SHIFT_PRESSED) - alt = 0 != (controlState & (LEFT_ALT_PRESSED | RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (LEFT_CTRL_PRESSED | RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -func charSequenceForKeys(key WORD, controlState DWORD, escapeSequence []byte) string { - i, ok := arrowKeyMapPrefix[key] - if ok { - shift, alt, control := getControlKeys(controlState) - modifier := getControlStateParameter(shift, alt, control, false) - return fmt.Sprintf(i, escapeSequence, modifier) - } - - i, ok = keyMapPrefix[key] - if ok { - shift, alt, control := getControlKeys(controlState) - modifier := getControlStateParameter(shift, alt, control, false) - return fmt.Sprintf(i, modifier) - } - - return "" -} - -// mapKeystokeToTerminalString maps the given input event record to string -func mapKeystokeToTerminalString(keyEvent *KEY_EVENT_RECORD, escapeSequence []byte) string { - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if keyEvent.UnicodeChar == 0 { - return charSequenceForKeys(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - - } - // +Key generates ESC N Key - if !control && alt { - return KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) - } - return string(keyEvent.UnicodeChar) -} - -// getAvailableInputEvents polls the console for availble events -// The function does not return until at least one input record has been read. -func getAvailableInputEvents(handle uintptr, inputEvents []INPUT_RECORD) (n int, err error) { - // TODO(azlinux): Why is there a for loop? Seems to me, that `n` cannot be negative. - tibor - for { - // Read number of console events available - n, err = readConsoleInputKey(handle, inputEvents) - if err != nil || n >= 0 { - return n, err - } - } -} - -// getTranslatedKeyCodes converts the input events into the string of characters -// The ansi escape sequence are used to map key strokes to the strings -func getTranslatedKeyCodes(inputEvents []INPUT_RECORD, escapeSequence []byte) string { - var buf bytes.Buffer - for i := 0; i < len(inputEvents); i++ { - input := inputEvents[i] - if input.EventType == KEY_EVENT && input.KeyEvent.KeyDown != 0 { - keyString := mapKeystokeToTerminalString(&input.KeyEvent, escapeSequence) - buf.WriteString(keyString) - } - } - return buf.String() -} - -// ReadChars reads the characters from the given reader -func (term *WindowsTerminal) ReadChars(fd uintptr, r io.Reader, p []byte) (n int, err error) { - for term.inputSize == 0 { - nr, err := getAvailableInputEvents(fd, term.inputEvents) - if nr == 0 && nil != err { - return n, err - } - if nr > 0 { - keyCodes := getTranslatedKeyCodes(term.inputEvents[:nr], term.inputEscapeSequence) - term.inputSize = copy(term.inputBuffer, keyCodes) - } - } - n = copy(p, term.inputBuffer[:term.inputSize]) - term.inputSize -= n - return n, nil -} - -// HandleInputSequence interprets the input sequence command -func (term *WindowsTerminal) HandleInputSequence(fd uintptr, command []byte) (n int, err error) { - return 0, nil -} - -func marshal(c COORD) uintptr { - // works only on intel-endian machines - return uintptr(uint32(uint32(uint16(c.Y))<<16 | uint32(uint16(c.X)))) -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - _, e := GetConsoleMode(fd) - return e == nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/console_windows_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/console_windows_test.go deleted file mode 100644 index ee9d9683..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/console_windows_test.go +++ /dev/null @@ -1,232 +0,0 @@ -// +build windows - -package winconsole - -import ( - "fmt" - "testing" -) - -func helpsTestParseInt16OrDefault(t *testing.T, expectedValue int16, shouldFail bool, input string, defaultValue int16, format string, args ...string) { - value, err := parseInt16OrDefault(input, defaultValue) - if nil != err && !shouldFail { - t.Errorf("Unexpected error returned %v", err) - t.Errorf(format, args) - } - if nil == err && shouldFail { - t.Errorf("Should have failed as expected\n\tReturned value = %d", value) - t.Errorf(format, args) - } - if expectedValue != value { - t.Errorf("The value returned does not macth expected\n\tExpected:%v\n\t:Actual%v", expectedValue, value) - t.Errorf(format, args) - } -} - -func TestParseInt16OrDefault(t *testing.T) { - // empty string - helpsTestParseInt16OrDefault(t, 0, false, "", 0, "Empty string returns default") - helpsTestParseInt16OrDefault(t, 2, false, "", 2, "Empty string returns default") - - // normal case - helpsTestParseInt16OrDefault(t, 0, false, "0", 0, "0 handled correctly") - helpsTestParseInt16OrDefault(t, 111, false, "111", 2, "Normal") - helpsTestParseInt16OrDefault(t, 111, false, "+111", 2, "+N") - helpsTestParseInt16OrDefault(t, -111, false, "-111", 2, "-N") - helpsTestParseInt16OrDefault(t, 0, false, "+0", 11, "+0") - helpsTestParseInt16OrDefault(t, 0, false, "-0", 12, "-0") - - // ill formed strings - helpsTestParseInt16OrDefault(t, 0, true, "abc", 0, "Invalid string") - helpsTestParseInt16OrDefault(t, 42, true, "+= 23", 42, "Invalid string") - helpsTestParseInt16OrDefault(t, 42, true, "123.45", 42, "float like") - -} - -func helpsTestGetNumberOfChars(t *testing.T, expected uint32, fromCoord COORD, toCoord COORD, screenSize COORD, format string, args ...interface{}) { - actual := getNumberOfChars(fromCoord, toCoord, screenSize) - mesg := fmt.Sprintf(format, args) - assertTrue(t, expected == actual, fmt.Sprintf("%s Expected=%d, Actual=%d, Parameters = { fromCoord=%+v, toCoord=%+v, screenSize=%+v", mesg, expected, actual, fromCoord, toCoord, screenSize)) -} - -func TestGetNumberOfChars(t *testing.T) { - // Note: The columns and lines are 0 based - // Also that interval is "inclusive" means will have both start and end chars - // This test only tests the number opf characters being written - - // all four corners - maxWindow := COORD{X: 80, Y: 50} - leftTop := COORD{X: 0, Y: 0} - rightTop := COORD{X: 79, Y: 0} - leftBottom := COORD{X: 0, Y: 49} - rightBottom := COORD{X: 79, Y: 49} - - // same position - helpsTestGetNumberOfChars(t, 1, COORD{X: 1, Y: 14}, COORD{X: 1, Y: 14}, COORD{X: 80, Y: 50}, "Same position random line") - - // four corners - helpsTestGetNumberOfChars(t, 1, leftTop, leftTop, maxWindow, "Same position- leftTop") - helpsTestGetNumberOfChars(t, 1, rightTop, rightTop, maxWindow, "Same position- rightTop") - helpsTestGetNumberOfChars(t, 1, leftBottom, leftBottom, maxWindow, "Same position- leftBottom") - helpsTestGetNumberOfChars(t, 1, rightBottom, rightBottom, maxWindow, "Same position- rightBottom") - - // from this char to next char on same line - helpsTestGetNumberOfChars(t, 2, COORD{X: 0, Y: 0}, COORD{X: 1, Y: 0}, maxWindow, "Next position on same line") - helpsTestGetNumberOfChars(t, 2, COORD{X: 1, Y: 14}, COORD{X: 2, Y: 14}, maxWindow, "Next position on same line") - - // from this char to next 10 chars on same line - helpsTestGetNumberOfChars(t, 11, COORD{X: 0, Y: 0}, COORD{X: 10, Y: 0}, maxWindow, "Next position on same line") - helpsTestGetNumberOfChars(t, 11, COORD{X: 1, Y: 14}, COORD{X: 11, Y: 14}, maxWindow, "Next position on same line") - - helpsTestGetNumberOfChars(t, 5, COORD{X: 3, Y: 11}, COORD{X: 7, Y: 11}, maxWindow, "To and from on same line") - - helpsTestGetNumberOfChars(t, 8, COORD{X: 0, Y: 34}, COORD{X: 7, Y: 34}, maxWindow, "Start of line to middle") - helpsTestGetNumberOfChars(t, 4, COORD{X: 76, Y: 34}, COORD{X: 79, Y: 34}, maxWindow, "Middle to end of line") - - // multiple lines - 1 - helpsTestGetNumberOfChars(t, 81, COORD{X: 0, Y: 0}, COORD{X: 0, Y: 1}, maxWindow, "one line below same X") - helpsTestGetNumberOfChars(t, 81, COORD{X: 10, Y: 10}, COORD{X: 10, Y: 11}, maxWindow, "one line below same X") - - // multiple lines - 2 - helpsTestGetNumberOfChars(t, 161, COORD{X: 0, Y: 0}, COORD{X: 0, Y: 2}, maxWindow, "one line below same X") - helpsTestGetNumberOfChars(t, 161, COORD{X: 10, Y: 10}, COORD{X: 10, Y: 12}, maxWindow, "one line below same X") - - // multiple lines - 3 - helpsTestGetNumberOfChars(t, 241, COORD{X: 0, Y: 0}, COORD{X: 0, Y: 3}, maxWindow, "one line below same X") - helpsTestGetNumberOfChars(t, 241, COORD{X: 10, Y: 10}, COORD{X: 10, Y: 13}, maxWindow, "one line below same X") - - // full line - helpsTestGetNumberOfChars(t, 80, COORD{X: 0, Y: 0}, COORD{X: 79, Y: 0}, maxWindow, "Full line - first") - helpsTestGetNumberOfChars(t, 80, COORD{X: 0, Y: 23}, COORD{X: 79, Y: 23}, maxWindow, "Full line - random") - helpsTestGetNumberOfChars(t, 80, COORD{X: 0, Y: 49}, COORD{X: 79, Y: 49}, maxWindow, "Full line - last") - - // full screen - helpsTestGetNumberOfChars(t, 80*50, leftTop, rightBottom, maxWindow, "full screen") - - helpsTestGetNumberOfChars(t, 80*50-1, COORD{X: 1, Y: 0}, rightBottom, maxWindow, "dropping first char to, end of screen") - helpsTestGetNumberOfChars(t, 80*50-2, COORD{X: 2, Y: 0}, rightBottom, maxWindow, "dropping first two char to, end of screen") - - helpsTestGetNumberOfChars(t, 80*50-1, leftTop, COORD{X: 78, Y: 49}, maxWindow, "from start of screen, till last char-1") - helpsTestGetNumberOfChars(t, 80*50-2, leftTop, COORD{X: 77, Y: 49}, maxWindow, "from start of screen, till last char-2") - - helpsTestGetNumberOfChars(t, 80*50-5, COORD{X: 4, Y: 0}, COORD{X: 78, Y: 49}, COORD{X: 80, Y: 50}, "from start of screen+4, till last char-1") - helpsTestGetNumberOfChars(t, 80*50-6, COORD{X: 4, Y: 0}, COORD{X: 77, Y: 49}, COORD{X: 80, Y: 50}, "from start of screen+4, till last char-2") -} - -var allForeground = []int16{ - ANSI_FOREGROUND_BLACK, - ANSI_FOREGROUND_RED, - ANSI_FOREGROUND_GREEN, - ANSI_FOREGROUND_YELLOW, - ANSI_FOREGROUND_BLUE, - ANSI_FOREGROUND_MAGENTA, - ANSI_FOREGROUND_CYAN, - ANSI_FOREGROUND_WHITE, - ANSI_FOREGROUND_DEFAULT, -} -var allBackground = []int16{ - ANSI_BACKGROUND_BLACK, - ANSI_BACKGROUND_RED, - ANSI_BACKGROUND_GREEN, - ANSI_BACKGROUND_YELLOW, - ANSI_BACKGROUND_BLUE, - ANSI_BACKGROUND_MAGENTA, - ANSI_BACKGROUND_CYAN, - ANSI_BACKGROUND_WHITE, - ANSI_BACKGROUND_DEFAULT, -} - -func maskForeground(flag WORD) WORD { - return flag & FOREGROUND_MASK_UNSET -} - -func onlyForeground(flag WORD) WORD { - return flag & FOREGROUND_MASK_SET -} - -func maskBackground(flag WORD) WORD { - return flag & BACKGROUND_MASK_UNSET -} - -func onlyBackground(flag WORD) WORD { - return flag & BACKGROUND_MASK_SET -} - -func helpsTestGetWindowsTextAttributeForAnsiValue(t *testing.T, oldValue WORD /*, expected WORD*/, ansi int16, onlyMask WORD, restMask WORD) WORD { - actual, err := getWindowsTextAttributeForAnsiValue(oldValue, FOREGROUND_MASK_SET, ansi) - assertTrue(t, nil == err, "Should be no error") - // assert that other bits are not affected - if 0 != oldValue { - assertTrue(t, (actual&restMask) == (oldValue&restMask), "The operation should not have affected other bits actual=%X oldValue=%X ansi=%d", actual, oldValue, ansi) - } - return actual -} - -func TestBackgroundForAnsiValue(t *testing.T) { - // Check that nothing else changes - // background changes - for _, state1 := range allBackground { - for _, state2 := range allBackground { - flag := WORD(0) - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) - } - } - // cummulative bcakground changes - for _, state1 := range allBackground { - flag := WORD(0) - for _, state2 := range allBackground { - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) - } - } - // change background after foreground - for _, state1 := range allForeground { - for _, state2 := range allBackground { - flag := WORD(0) - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) - } - } - // change background after change cumulative - for _, state1 := range allForeground { - flag := WORD(0) - for _, state2 := range allBackground { - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) - } - } -} - -func TestForegroundForAnsiValue(t *testing.T) { - // Check that nothing else changes - for _, state1 := range allForeground { - for _, state2 := range allForeground { - flag := WORD(0) - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) - } - } - - for _, state1 := range allForeground { - flag := WORD(0) - for _, state2 := range allForeground { - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) - } - } - for _, state1 := range allBackground { - for _, state2 := range allForeground { - flag := WORD(0) - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) - } - } - for _, state1 := range allBackground { - flag := WORD(0) - for _, state2 := range allForeground { - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state1, BACKGROUND_MASK_SET, BACKGROUND_MASK_UNSET) - flag = helpsTestGetWindowsTextAttributeForAnsiValue(t, flag, state2, FOREGROUND_MASK_SET, FOREGROUND_MASK_UNSET) - } - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/term_emulator.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/term_emulator.go deleted file mode 100644 index 8c9f3428..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/term_emulator.go +++ /dev/null @@ -1,218 +0,0 @@ -package winconsole - -import ( - "io" - "strconv" - "strings" -) - -// http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html -const ( - ANSI_ESCAPE_PRIMARY = 0x1B - ANSI_ESCAPE_SECONDARY = 0x5B - ANSI_COMMAND_FIRST = 0x40 - ANSI_COMMAND_LAST = 0x7E - ANSI_PARAMETER_SEP = ";" - ANSI_CMD_G0 = '(' - ANSI_CMD_G1 = ')' - ANSI_CMD_G2 = '*' - ANSI_CMD_G3 = '+' - ANSI_CMD_DECPNM = '>' - ANSI_CMD_DECPAM = '=' - ANSI_CMD_OSC = ']' - ANSI_CMD_STR_TERM = '\\' - ANSI_BEL = 0x07 - KEY_EVENT = 1 -) - -// Interface that implements terminal handling -type terminalEmulator interface { - HandleOutputCommand(fd uintptr, command []byte) (n int, err error) - HandleInputSequence(fd uintptr, command []byte) (n int, err error) - WriteChars(fd uintptr, w io.Writer, p []byte) (n int, err error) - ReadChars(fd uintptr, w io.Reader, p []byte) (n int, err error) -} - -type terminalWriter struct { - wrappedWriter io.Writer - emulator terminalEmulator - command []byte - inSequence bool - fd uintptr -} - -type terminalReader struct { - wrappedReader io.ReadCloser - emulator terminalEmulator - command []byte - inSequence bool - fd uintptr -} - -// http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html -func isAnsiCommandChar(b byte) bool { - switch { - case ANSI_COMMAND_FIRST <= b && b <= ANSI_COMMAND_LAST && b != ANSI_ESCAPE_SECONDARY: - return true - case b == ANSI_CMD_G1 || b == ANSI_CMD_OSC || b == ANSI_CMD_DECPAM || b == ANSI_CMD_DECPNM: - // non-CSI escape sequence terminator - return true - case b == ANSI_CMD_STR_TERM || b == ANSI_BEL: - // String escape sequence terminator - return true - } - return false -} - -func isCharacterSelectionCmdChar(b byte) bool { - return (b == ANSI_CMD_G0 || b == ANSI_CMD_G1 || b == ANSI_CMD_G2 || b == ANSI_CMD_G3) -} - -func isXtermOscSequence(command []byte, current byte) bool { - return (len(command) >= 2 && command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_CMD_OSC && current != ANSI_BEL) -} - -// Write writes len(p) bytes from p to the underlying data stream. -// http://golang.org/pkg/io/#Writer -func (tw *terminalWriter) Write(p []byte) (n int, err error) { - if len(p) == 0 { - return 0, nil - } - if tw.emulator == nil { - return tw.wrappedWriter.Write(p) - } - // Emulate terminal by extracting commands and executing them - totalWritten := 0 - start := 0 // indicates start of the next chunk - end := len(p) - for current := 0; current < end; current++ { - if tw.inSequence { - // inside escape sequence - tw.command = append(tw.command, p[current]) - if isAnsiCommandChar(p[current]) { - if !isXtermOscSequence(tw.command, p[current]) { - // found the last command character. - // Now we have a complete command. - nchar, err := tw.emulator.HandleOutputCommand(tw.fd, tw.command) - totalWritten += nchar - if err != nil { - return totalWritten, err - } - - // clear the command - // don't include current character again - tw.command = tw.command[:0] - start = current + 1 - tw.inSequence = false - } - } - } else { - if p[current] == ANSI_ESCAPE_PRIMARY { - // entering escape sequnce - tw.inSequence = true - // indicates end of "normal sequence", write whatever you have so far - if len(p[start:current]) > 0 { - nw, err := tw.emulator.WriteChars(tw.fd, tw.wrappedWriter, p[start:current]) - totalWritten += nw - if err != nil { - return totalWritten, err - } - } - // include the current character as part of the next sequence - tw.command = append(tw.command, p[current]) - } - } - } - // note that so far, start of the escape sequence triggers writing out of bytes to console. - // For the part _after_ the end of last escape sequence, it is not written out yet. So write it out - if !tw.inSequence { - // assumption is that we can't be inside sequence and therefore command should be empty - if len(p[start:]) > 0 { - nw, err := tw.emulator.WriteChars(tw.fd, tw.wrappedWriter, p[start:]) - totalWritten += nw - if err != nil { - return totalWritten, err - } - } - } - return totalWritten, nil - -} - -// Read reads up to len(p) bytes into p. -// http://golang.org/pkg/io/#Reader -func (tr *terminalReader) Read(p []byte) (n int, err error) { - //Implementations of Read are discouraged from returning a zero byte count - // with a nil error, except when len(p) == 0. - if len(p) == 0 { - return 0, nil - } - if nil == tr.emulator { - return tr.readFromWrappedReader(p) - } - return tr.emulator.ReadChars(tr.fd, tr.wrappedReader, p) -} - -// Close the underlying stream -func (tr *terminalReader) Close() (err error) { - return tr.wrappedReader.Close() -} - -func (tr *terminalReader) readFromWrappedReader(p []byte) (n int, err error) { - return tr.wrappedReader.Read(p) -} - -type ansiCommand struct { - CommandBytes []byte - Command string - Parameters []string - IsSpecial bool -} - -func parseAnsiCommand(command []byte) *ansiCommand { - if isCharacterSelectionCmdChar(command[1]) { - // Is Character Set Selection commands - return &ansiCommand{ - CommandBytes: command, - Command: string(command), - IsSpecial: true, - } - } - // last char is command character - lastCharIndex := len(command) - 1 - - retValue := &ansiCommand{ - CommandBytes: command, - Command: string(command[lastCharIndex]), - IsSpecial: false, - } - // more than a single escape - if lastCharIndex != 0 { - start := 1 - // skip if double char escape sequence - if command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_ESCAPE_SECONDARY { - start++ - } - // convert this to GetNextParam method - retValue.Parameters = strings.Split(string(command[start:lastCharIndex]), ANSI_PARAMETER_SEP) - } - return retValue -} - -func (c *ansiCommand) getParam(index int) string { - if len(c.Parameters) > index { - return c.Parameters[index] - } - return "" -} - -func parseInt16OrDefault(s string, defaultValue int16) (n int16, err error) { - if s == "" { - return defaultValue, nil - } - parsedValue, err := strconv.ParseInt(s, 10, 16) - if err != nil { - return defaultValue, err - } - return int16(parsedValue), nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/term_emulator_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/term_emulator_test.go deleted file mode 100644 index 65de5a79..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/term_emulator_test.go +++ /dev/null @@ -1,388 +0,0 @@ -package winconsole - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "testing" -) - -const ( - WRITE_OPERATION = iota - COMMAND_OPERATION = iota -) - -var languages = []string{ - "Български", - "Català", - "Čeština", - "Ελληνικά", - "Español", - "Esperanto", - "Euskara", - "Français", - "Galego", - "한국어", - "ქართული", - "Latviešu", - "Lietuvių", - "Magyar", - "Nederlands", - "日本語", - "Norsk bokmål", - "Norsk nynorsk", - "Polski", - "Português", - "Română", - "Русский", - "Slovenčina", - "Slovenščina", - "Српски", - "српскохрватски", - "Suomi", - "Svenska", - "ไทย", - "Tiếng Việt", - "Türkçe", - "Українська", - "中文", -} - -// Mock terminal handler object -type mockTerminal struct { - OutputCommandSequence []terminalOperation -} - -// Used for recording the callback data -type terminalOperation struct { - Operation int - Data []byte - Str string -} - -func (mt *mockTerminal) record(operation int, data []byte) { - op := terminalOperation{ - Operation: operation, - Data: make([]byte, len(data)), - } - copy(op.Data, data) - op.Str = string(op.Data) - mt.OutputCommandSequence = append(mt.OutputCommandSequence, op) -} - -func (mt *mockTerminal) HandleOutputCommand(fd uintptr, command []byte) (n int, err error) { - mt.record(COMMAND_OPERATION, command) - return len(command), nil -} - -func (mt *mockTerminal) HandleInputSequence(fd uintptr, command []byte) (n int, err error) { - return 0, nil -} - -func (mt *mockTerminal) WriteChars(fd uintptr, w io.Writer, p []byte) (n int, err error) { - mt.record(WRITE_OPERATION, p) - return len(p), nil -} - -func (mt *mockTerminal) ReadChars(fd uintptr, w io.Reader, p []byte) (n int, err error) { - return len(p), nil -} - -func assertTrue(t *testing.T, cond bool, format string, args ...interface{}) { - if !cond { - t.Errorf(format, args...) - } -} - -// reflect.DeepEqual does not provide detailed information as to what excatly failed. -func assertBytesEqual(t *testing.T, expected, actual []byte, format string, args ...interface{}) { - match := true - mismatchIndex := 0 - if len(expected) == len(actual) { - for i := 0; i < len(expected); i++ { - if expected[i] != actual[i] { - match = false - mismatchIndex = i - break - } - } - } else { - match = false - t.Errorf("Lengths don't match Expected=%d Actual=%d", len(expected), len(actual)) - } - if !match { - t.Errorf("Mismatch at index %d ", mismatchIndex) - t.Errorf("\tActual String = %s", string(actual)) - t.Errorf("\tExpected String = %s", string(expected)) - t.Errorf("\tActual = %v", actual) - t.Errorf("\tExpected = %v", expected) - t.Errorf(format, args) - } -} - -// Just to make sure :) -func TestAssertEqualBytes(t *testing.T) { - data := []byte{9, 9, 1, 1, 1, 9, 9} - assertBytesEqual(t, data, data, "Self") - assertBytesEqual(t, data[1:4], data[1:4], "Self") - assertBytesEqual(t, []byte{1, 1}, []byte{1, 1}, "Simple match") - assertBytesEqual(t, []byte{1, 2, 3}, []byte{1, 2, 3}, "content mismatch") - assertBytesEqual(t, []byte{1, 1, 1}, data[2:5], "slice match") -} - -/* -func TestAssertEqualBytesNegative(t *testing.T) { - AssertBytesEqual(t, []byte{1, 1}, []byte{1}, "Length mismatch") - AssertBytesEqual(t, []byte{1, 1}, []byte{1}, "Length mismatch") - AssertBytesEqual(t, []byte{1, 2, 3}, []byte{1, 1, 1}, "content mismatch") -}*/ - -// Checks that the calls recieved -func assertHandlerOutput(t *testing.T, mock *mockTerminal, plainText string, commands ...string) { - text := make([]byte, 0, 3*len(plainText)) - cmdIndex := 0 - for opIndex := 0; opIndex < len(mock.OutputCommandSequence); opIndex++ { - op := mock.OutputCommandSequence[opIndex] - if op.Operation == WRITE_OPERATION { - t.Logf("\nThe data is[%d] == %s", opIndex, string(op.Data)) - text = append(text[:], op.Data...) - } else { - assertTrue(t, mock.OutputCommandSequence[opIndex].Operation == COMMAND_OPERATION, "Operation should be command : %s", fmt.Sprintf("%+v", mock)) - assertBytesEqual(t, StringToBytes(commands[cmdIndex]), mock.OutputCommandSequence[opIndex].Data, "Command data should match") - cmdIndex++ - } - } - assertBytesEqual(t, StringToBytes(plainText), text, "Command data should match %#v", mock) -} - -func StringToBytes(str string) []byte { - bytes := make([]byte, len(str)) - copy(bytes[:], str) - return bytes -} - -func TestParseAnsiCommand(t *testing.T) { - // Note: if the parameter does not exist then the empty value is returned - - c := parseAnsiCommand(StringToBytes("\x1Bm")) - assertTrue(t, c.Command == "m", "Command should be m") - assertTrue(t, "" == c.getParam(0), "should return empty string") - assertTrue(t, "" == c.getParam(1), "should return empty string") - - // Escape sequence - ESC[ - c = parseAnsiCommand(StringToBytes("\x1B[m")) - assertTrue(t, c.Command == "m", "Command should be m") - assertTrue(t, "" == c.getParam(0), "should return empty string") - assertTrue(t, "" == c.getParam(1), "should return empty string") - - // Escape sequence With empty parameters- ESC[ - c = parseAnsiCommand(StringToBytes("\x1B[;m")) - assertTrue(t, c.Command == "m", "Command should be m") - assertTrue(t, "" == c.getParam(0), "should return empty string") - assertTrue(t, "" == c.getParam(1), "should return empty string") - assertTrue(t, "" == c.getParam(2), "should return empty string") - - // Escape sequence With empty muliple parameters- ESC[ - c = parseAnsiCommand(StringToBytes("\x1B[;;m")) - assertTrue(t, c.Command == "m", "Command should be m") - assertTrue(t, "" == c.getParam(0), "") - assertTrue(t, "" == c.getParam(1), "") - assertTrue(t, "" == c.getParam(2), "") - - // Escape sequence With muliple parameters- ESC[ - c = parseAnsiCommand(StringToBytes("\x1B[1;2;3m")) - assertTrue(t, c.Command == "m", "Command should be m") - assertTrue(t, "1" == c.getParam(0), "") - assertTrue(t, "2" == c.getParam(1), "") - assertTrue(t, "3" == c.getParam(2), "") - - // Escape sequence With muliple parameters- some missing - c = parseAnsiCommand(StringToBytes("\x1B[1;;3;;;6m")) - assertTrue(t, c.Command == "m", "Command should be m") - assertTrue(t, "1" == c.getParam(0), "") - assertTrue(t, "" == c.getParam(1), "") - assertTrue(t, "3" == c.getParam(2), "") - assertTrue(t, "" == c.getParam(3), "") - assertTrue(t, "" == c.getParam(4), "") - assertTrue(t, "6" == c.getParam(5), "") -} - -func newBufferedMockTerm() (stdOut io.Writer, stdErr io.Writer, stdIn io.ReadCloser, mock *mockTerminal) { - var input bytes.Buffer - var output bytes.Buffer - var err bytes.Buffer - - mock = &mockTerminal{ - OutputCommandSequence: make([]terminalOperation, 0, 256), - } - - stdOut = &terminalWriter{ - wrappedWriter: &output, - emulator: mock, - command: make([]byte, 0, 256), - } - stdErr = &terminalWriter{ - wrappedWriter: &err, - emulator: mock, - command: make([]byte, 0, 256), - } - stdIn = &terminalReader{ - wrappedReader: ioutil.NopCloser(&input), - emulator: mock, - command: make([]byte, 0, 256), - } - - return -} - -func TestOutputSimple(t *testing.T) { - stdOut, _, _, mock := newBufferedMockTerm() - - stdOut.Write(StringToBytes("Hello world")) - stdOut.Write(StringToBytes("\x1BmHello again")) - - assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) - assertBytesEqual(t, StringToBytes("Hello world"), mock.OutputCommandSequence[0].Data, "Write data should match") - - assertTrue(t, mock.OutputCommandSequence[1].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) - assertBytesEqual(t, StringToBytes("\x1Bm"), mock.OutputCommandSequence[1].Data, "Command data should match") - - assertTrue(t, mock.OutputCommandSequence[2].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) - assertBytesEqual(t, StringToBytes("Hello again"), mock.OutputCommandSequence[2].Data, "Write data should match") -} - -func TestOutputSplitCommand(t *testing.T) { - stdOut, _, _, mock := newBufferedMockTerm() - - stdOut.Write(StringToBytes("Hello world\x1B[1;2;3")) - stdOut.Write(StringToBytes("mHello again")) - - assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) - assertBytesEqual(t, StringToBytes("Hello world"), mock.OutputCommandSequence[0].Data, "Write data should match") - - assertTrue(t, mock.OutputCommandSequence[1].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) - assertBytesEqual(t, StringToBytes("\x1B[1;2;3m"), mock.OutputCommandSequence[1].Data, "Command data should match") - - assertTrue(t, mock.OutputCommandSequence[2].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) - assertBytesEqual(t, StringToBytes("Hello again"), mock.OutputCommandSequence[2].Data, "Write data should match") -} - -func TestOutputMultipleCommands(t *testing.T) { - stdOut, _, _, mock := newBufferedMockTerm() - - stdOut.Write(StringToBytes("Hello world")) - stdOut.Write(StringToBytes("\x1B[1;2;3m")) - stdOut.Write(StringToBytes("\x1B[J")) - stdOut.Write(StringToBytes("Hello again")) - - assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) - assertBytesEqual(t, StringToBytes("Hello world"), mock.OutputCommandSequence[0].Data, "Write data should match") - - assertTrue(t, mock.OutputCommandSequence[1].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) - assertBytesEqual(t, StringToBytes("\x1B[1;2;3m"), mock.OutputCommandSequence[1].Data, "Command data should match") - - assertTrue(t, mock.OutputCommandSequence[2].Operation == COMMAND_OPERATION, "Operation should be command : %+v", mock) - assertBytesEqual(t, StringToBytes("\x1B[J"), mock.OutputCommandSequence[2].Data, "Command data should match") - - assertTrue(t, mock.OutputCommandSequence[3].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) - assertBytesEqual(t, StringToBytes("Hello again"), mock.OutputCommandSequence[3].Data, "Write data should match") -} - -// Splits the given data in two chunks , makes two writes and checks the split data is parsed correctly -// checks output write/command is passed to handler correctly -func helpsTestOutputSplitChunksAtIndex(t *testing.T, i int, data []byte) { - t.Logf("\ni=%d", i) - stdOut, _, _, mock := newBufferedMockTerm() - - t.Logf("\nWriting chunk[0] == %s", string(data[:i])) - t.Logf("\nWriting chunk[1] == %s", string(data[i:])) - stdOut.Write(data[:i]) - stdOut.Write(data[i:]) - - assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) - assertBytesEqual(t, data[:i], mock.OutputCommandSequence[0].Data, "Write data should match") - - assertTrue(t, mock.OutputCommandSequence[1].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) - assertBytesEqual(t, data[i:], mock.OutputCommandSequence[1].Data, "Write data should match") -} - -// Splits the given data in three chunks , makes three writes and checks the split data is parsed correctly -// checks output write/command is passed to handler correctly -func helpsTestOutputSplitThreeChunksAtIndex(t *testing.T, data []byte, i int, j int) { - stdOut, _, _, mock := newBufferedMockTerm() - - t.Logf("\nWriting chunk[0] == %s", string(data[:i])) - t.Logf("\nWriting chunk[1] == %s", string(data[i:j])) - t.Logf("\nWriting chunk[2] == %s", string(data[j:])) - stdOut.Write(data[:i]) - stdOut.Write(data[i:j]) - stdOut.Write(data[j:]) - - assertTrue(t, mock.OutputCommandSequence[0].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) - assertBytesEqual(t, data[:i], mock.OutputCommandSequence[0].Data, "Write data should match") - - assertTrue(t, mock.OutputCommandSequence[1].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) - assertBytesEqual(t, data[i:j], mock.OutputCommandSequence[1].Data, "Write data should match") - - assertTrue(t, mock.OutputCommandSequence[2].Operation == WRITE_OPERATION, "Operation should be Write : %#v", mock) - assertBytesEqual(t, data[j:], mock.OutputCommandSequence[2].Data, "Write data should match") -} - -// Splits the output into two parts and tests all such possible pairs -func helpsTestOutputSplitChunks(t *testing.T, data []byte) { - for i := 1; i < len(data)-1; i++ { - helpsTestOutputSplitChunksAtIndex(t, i, data) - } -} - -// Splits the output in three parts and tests all such possible triples -func helpsTestOutputSplitThreeChunks(t *testing.T, data []byte) { - for i := 1; i < len(data)-2; i++ { - for j := i + 1; j < len(data)-1; j++ { - helpsTestOutputSplitThreeChunksAtIndex(t, data, i, j) - } - } -} - -func helpsTestOutputSplitCommandsAtIndex(t *testing.T, data []byte, i int, plainText string, commands ...string) { - t.Logf("\ni=%d", i) - stdOut, _, _, mock := newBufferedMockTerm() - - stdOut.Write(data[:i]) - stdOut.Write(data[i:]) - assertHandlerOutput(t, mock, plainText, commands...) -} - -func helpsTestOutputSplitCommands(t *testing.T, data []byte, plainText string, commands ...string) { - for i := 1; i < len(data)-1; i++ { - helpsTestOutputSplitCommandsAtIndex(t, data, i, plainText, commands...) - } -} - -func injectCommandAt(data string, i int, command string) string { - retValue := make([]byte, len(data)+len(command)+4) - retValue = append(retValue, data[:i]...) - retValue = append(retValue, data[i:]...) - return string(retValue) -} - -func TestOutputSplitChunks(t *testing.T) { - data := StringToBytes("qwertyuiopasdfghjklzxcvbnm") - helpsTestOutputSplitChunks(t, data) - helpsTestOutputSplitChunks(t, StringToBytes("BBBBB")) - helpsTestOutputSplitThreeChunks(t, StringToBytes("ABCDE")) -} - -func TestOutputSplitChunksIncludingCommands(t *testing.T) { - helpsTestOutputSplitCommands(t, StringToBytes("Hello world.\x1B[mHello again."), "Hello world.Hello again.", "\x1B[m") - helpsTestOutputSplitCommandsAtIndex(t, StringToBytes("Hello world.\x1B[mHello again."), 2, "Hello world.Hello again.", "\x1B[m") -} - -func TestSplitChunkUnicode(t *testing.T) { - for _, l := range languages { - data := StringToBytes(l) - helpsTestOutputSplitChunks(t, data) - helpsTestOutputSplitThreeChunks(t, data) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json.go deleted file mode 100644 index 8043d69d..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/timeutils/json.go +++ /dev/null @@ -1,26 +0,0 @@ -package timeutils - -import ( - "errors" - "time" -) - -const ( - // RFC3339NanoFixed is our own version of RFC339Nano because we want one - // that pads the nano seconds part with zeros to ensure - // the timestamps are aligned in the logs. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - // JSONFormat is the format used by FastMarshalJSON - JSONFormat = `"` + time.RFC3339Nano + `"` -) - -// FastMarshalJSON avoids one of the extra allocations that -// time.MarshalJSON is making. -func FastMarshalJSON(t time.Time) (string, error) { - if y := t.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See golang.org/issue/4556#c15 for more discussion. - return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") - } - return t.Format(JSONFormat), nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit.go index 2375315e..eb2ae4e8 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit.go @@ -102,5 +102,5 @@ func (u *Ulimit) GetRlimit() (*Rlimit, error) { } func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%s:%s", u.Name, u.Soft, u.Hard) + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) } diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit_test.go index 419b5e04..1e8c881f 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/ulimit/ulimit_test.go @@ -2,6 +2,13 @@ package ulimit import "testing" +func TestParseValid(t *testing.T) { + u1 := &Ulimit{"nofile", 1024, 512} + if u2, _ := Parse("nofile=512:1024"); *u1 != *u2 { + t.Fatalf("expected %q, but got %q", u1, u2) + } +} + func TestParseInvalidLimitType(t *testing.T) { if _, err := Parse("notarealtype=1024:1024"); err == nil { t.Fatalf("expected error on invalid ulimit type") @@ -39,3 +46,10 @@ func TestParseInvalidValueType(t *testing.T) { t.Fatal("expected error on bad value type") } } + +func TestStringOutput(t *testing.T) { + u := &Ulimit{"nofile", 1024, 512} + if s := u.String(); s != "nofile=512:1024" { + t.Fatal("expected String to return nofile=512:1024, but got", s) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go index 7cfb57ba..d7850ad0 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go +++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go @@ -37,23 +37,25 @@ var ( var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} +// CustomSize returns a human-readable approximation of a size +// using custom format +func CustomSize(format string, size float64, base float64, _map []string) string { + i := 0 + for size >= base { + size = size / base + i++ + } + return fmt.Sprintf(format, size, _map[i]) +} + // HumanSize returns a human-readable approximation of a size // using SI standard (eg. "44kB", "17MB") func HumanSize(size float64) string { - return intToString(float64(size), 1000.0, decimapAbbrs) + return CustomSize("%.4g %s", float64(size), 1000.0, decimapAbbrs) } func BytesSize(size float64) string { - return intToString(size, 1024.0, binaryAbbrs) -} - -func intToString(size, unit float64, _map []string) string { - i := 0 - for size >= unit { - size = size / unit - i++ - } - return fmt.Sprintf("%.4g %s", size, _map[i]) + return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) } // FromHumanSize returns an integer from a human-readable specification of a diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go deleted file mode 100644 index cc802a65..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version.go +++ /dev/null @@ -1,63 +0,0 @@ -package version - -import ( - "strconv" - "strings" -) - -// Version provides utility methods for comparing versions. -type Version string - -func (v Version) compareTo(other Version) int { - var ( - currTab = strings.Split(string(v), ".") - otherTab = strings.Split(string(other), ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another version -func (v Version) LessThan(other Version) bool { - return v.compareTo(other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func (v Version) LessThanOrEqualTo(other Version) bool { - return v.compareTo(other) <= 0 -} - -// GreaterThan checks if a version is greater than another one -func (v Version) GreaterThan(other Version) bool { - return v.compareTo(other) == 1 -} - -// GreaterThanOrEqualTo checks ia version is greater than or equal to another -func (v Version) GreaterThanOrEqualTo(other Version) bool { - return v.compareTo(other) >= 0 -} - -// Equal checks if a version is equal to another -func (v Version) Equal(other Version) bool { - return v.compareTo(other) == 0 -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version_test.go deleted file mode 100644 index c02ec40f..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/pkg/version/version_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package version - -import ( - "testing" -) - -func assertVersion(t *testing.T, a, b string, result int) { - if r := Version(a).compareTo(Version(b)); r != result { - t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) - } -} - -func TestCompareVersion(t *testing.T) { - assertVersion(t, "1.12", "1.12", 0) - assertVersion(t, "1.0.0", "1", 0) - assertVersion(t, "1", "1.0.0", 0) - assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) - assertVersion(t, "1", "1.0.1", -1) - assertVersion(t, "1.0.1", "1", 1) - assertVersion(t, "1.0.1", "1.0.2", -1) - assertVersion(t, "1.0.2", "1.0.3", -1) - assertVersion(t, "1.0.3", "1.1", -1) - assertVersion(t, "1.1", "1.1.1", -1) - assertVersion(t, "1.1.1", "1.1.2", -1) - assertVersion(t, "1.1.2", "1.2", -1) - -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/compare.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/compare.go index 60a21a79..1d969e9b 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/compare.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/compare.go @@ -10,25 +10,25 @@ func Compare(a, b *Config) bool { if a.AttachStdout != b.AttachStdout || a.AttachStderr != b.AttachStderr || a.User != b.User || - a.Memory != b.Memory || - a.MemorySwap != b.MemorySwap || - a.CpuShares != b.CpuShares || a.OpenStdin != b.OpenStdin || a.Tty != b.Tty { return false } - if len(a.Cmd) != len(b.Cmd) || + + if a.Cmd.Len() != b.Cmd.Len() || len(a.Env) != len(b.Env) || len(a.Labels) != len(b.Labels) || len(a.PortSpecs) != len(b.PortSpecs) || len(a.ExposedPorts) != len(b.ExposedPorts) || - len(a.Entrypoint) != len(b.Entrypoint) || + a.Entrypoint.Len() != b.Entrypoint.Len() || len(a.Volumes) != len(b.Volumes) { return false } - for i := 0; i < len(a.Cmd); i++ { - if a.Cmd[i] != b.Cmd[i] { + aCmd := a.Cmd.Slice() + bCmd := b.Cmd.Slice() + for i := 0; i < len(aCmd); i++ { + if aCmd[i] != bCmd[i] { return false } } @@ -52,8 +52,11 @@ func Compare(a, b *Config) bool { return false } } - for i := 0; i < len(a.Entrypoint); i++ { - if a.Entrypoint[i] != b.Entrypoint[i] { + + aEntrypoint := a.Entrypoint.Slice() + bEntrypoint := b.Entrypoint.Slice() + for i := 0; i < len(aEntrypoint); i++ { + if aEntrypoint[i] != bEntrypoint[i] { return false } } diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/config.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/config.go index 3e32a1e3..844958be 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/config.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/config.go @@ -1,10 +1,103 @@ package runconfig import ( - "github.com/docker/docker/engine" + "encoding/json" + "io" + "github.com/docker/docker/nat" ) +// Entrypoint encapsulates the container entrypoint. +// It might be represented as a string or an array of strings. +// We need to override the json decoder to accept both options. +// The JSON decoder will fail if the api sends an string and +// we try to decode it into an array of string. +type Entrypoint struct { + parts []string +} + +func (e *Entrypoint) MarshalJSON() ([]byte, error) { + if e == nil { + return []byte{}, nil + } + return json.Marshal(e.Slice()) +} + +// UnmarshalJSON decoded the entrypoint whether it's a string or an array of strings. +func (e *Entrypoint) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + p = append(p, string(b)) + } + e.parts = p + return nil +} + +func (e *Entrypoint) Len() int { + if e == nil { + return 0 + } + return len(e.parts) +} + +func (e *Entrypoint) Slice() []string { + if e == nil { + return nil + } + return e.parts +} + +func NewEntrypoint(parts ...string) *Entrypoint { + return &Entrypoint{parts} +} + +type Command struct { + parts []string +} + +func (e *Command) MarshalJSON() ([]byte, error) { + if e == nil { + return []byte{}, nil + } + return json.Marshal(e.Slice()) +} + +// UnmarshalJSON decoded the entrypoint whether it's a string or an array of strings. +func (e *Command) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + p = append(p, string(b)) + } + e.parts = p + return nil +} + +func (e *Command) Len() int { + if e == nil { + return 0 + } + return len(e.parts) +} + +func (e *Command) Slice() []string { + if e == nil { + return nil + } + return e.parts +} + +func NewCommand(parts ...string) *Command { + return &Command{parts} +} + // Note: the Config structure should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". // Non-portable information *should* appear in HostConfig. @@ -12,10 +105,6 @@ type Config struct { Hostname string Domainname string User string - Memory int64 // FIXME: we keep it for backward compatibility, it has been moved to hostConfig. - MemorySwap int64 // FIXME: it has been moved to hostConfig. - CpuShares int64 // FIXME: it has been moved to hostConfig. - Cpuset string // FIXME: it has been moved to hostConfig and renamed to CpusetCpus. AttachStdin bool AttachStdout bool AttachStderr bool @@ -25,54 +114,37 @@ type Config struct { OpenStdin bool // Open stdin StdinOnce bool // If true, close stdin after the 1 attached client disconnects. Env []string - Cmd []string + Cmd *Command Image string // Name of the image as it was passed by the operator (eg. could be symbolic) Volumes map[string]struct{} WorkingDir string - Entrypoint []string + Entrypoint *Entrypoint NetworkDisabled bool MacAddress string OnBuild []string - SecurityOpt []string Labels map[string]string } -func ContainerConfigFromJob(job *engine.Job) *Config { - config := &Config{ - Hostname: job.Getenv("Hostname"), - Domainname: job.Getenv("Domainname"), - User: job.Getenv("User"), - Memory: job.GetenvInt64("Memory"), - MemorySwap: job.GetenvInt64("MemorySwap"), - CpuShares: job.GetenvInt64("CpuShares"), - Cpuset: job.Getenv("Cpuset"), - AttachStdin: job.GetenvBool("AttachStdin"), - AttachStdout: job.GetenvBool("AttachStdout"), - AttachStderr: job.GetenvBool("AttachStderr"), - Tty: job.GetenvBool("Tty"), - OpenStdin: job.GetenvBool("OpenStdin"), - StdinOnce: job.GetenvBool("StdinOnce"), - Image: job.Getenv("Image"), - WorkingDir: job.Getenv("WorkingDir"), - NetworkDisabled: job.GetenvBool("NetworkDisabled"), - MacAddress: job.Getenv("MacAddress"), - } - job.GetenvJson("ExposedPorts", &config.ExposedPorts) - job.GetenvJson("Volumes", &config.Volumes) - if PortSpecs := job.GetenvList("PortSpecs"); PortSpecs != nil { - config.PortSpecs = PortSpecs - } - if Env := job.GetenvList("Env"); Env != nil { - config.Env = Env - } - if Cmd := job.GetenvList("Cmd"); Cmd != nil { - config.Cmd = Cmd - } - - job.GetenvJson("Labels", &config.Labels) - - if Entrypoint := job.GetenvList("Entrypoint"); Entrypoint != nil { - config.Entrypoint = Entrypoint - } - return config +type ContainerConfigWrapper struct { + *Config + *hostConfigWrapper +} + +func (c ContainerConfigWrapper) HostConfig() *HostConfig { + if c.hostConfigWrapper == nil { + return new(HostConfig) + } + + return c.hostConfigWrapper.GetHostConfig() +} + +func DecodeContainerConfig(src io.Reader) (*Config, *HostConfig, error) { + decoder := json.NewDecoder(src) + + var w ContainerConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, nil, err + } + + return w.Config, w.HostConfig(), nil } diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/config_test.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/config_test.go index accbd910..87fc6c6a 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/config_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/config_test.go @@ -1,7 +1,9 @@ package runconfig import ( + "bytes" "fmt" + "io/ioutil" "strings" "testing" @@ -102,7 +104,7 @@ func TestParseRunVolumes(t *testing.T) { if config, hostConfig := mustParse(t, "-v /tmp -v /var"); hostConfig.Binds != nil { t.Fatalf("Error parsing volume flags, `-v /tmp -v /var` should not mount-bind anything. Received %v", hostConfig.Binds) } else if _, exists := config.Volumes["/tmp"]; !exists { - t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Recevied %v", config.Volumes) + t.Fatalf("Error parsing volume flags, `-v /tmp` is missing from volumes. Received %v", config.Volumes) } else if _, exists := config.Volumes["/var"]; !exists { t.Fatalf("Error parsing volume flags, `-v /var` is missing from volumes. Received %v", config.Volumes) } @@ -260,5 +262,39 @@ func TestMerge(t *testing.T) { t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) } } - +} + +func TestDecodeContainerConfig(t *testing.T) { + fixtures := []struct { + file string + entrypoint *Entrypoint + }{ + {"fixtures/container_config_1_14.json", NewEntrypoint()}, + {"fixtures/container_config_1_17.json", NewEntrypoint("bash")}, + {"fixtures/container_config_1_19.json", NewEntrypoint("bash")}, + } + + for _, f := range fixtures { + b, err := ioutil.ReadFile(f.file) + if err != nil { + t.Fatal(err) + } + + c, h, err := DecodeContainerConfig(bytes.NewReader(b)) + if err != nil { + t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) + } + + if c.Image != "ubuntu" { + t.Fatalf("Expected ubuntu image, found %s\n", c.Image) + } + + if c.Entrypoint.Len() != f.entrypoint.Len() { + t.Fatalf("Expected %v, found %v\n", f.entrypoint, c.Entrypoint) + } + + if h.Memory != 1000 { + t.Fatalf("Expected memory to be 1000, found %d\n", h.Memory) + } + } } diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/exec.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/exec.go index 9390781a..8fe05be1 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/exec.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/exec.go @@ -1,11 +1,7 @@ package runconfig import ( - "fmt" - - "github.com/docker/docker/engine" flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/utils" ) type ExecConfig struct { @@ -20,37 +16,18 @@ type ExecConfig struct { Cmd []string } -func ExecConfigFromJob(job *engine.Job) (*ExecConfig, error) { - execConfig := &ExecConfig{ - // TODO(vishh): Expose 'User' once it is supported. - //User: job.Getenv("User"), - // TODO(vishh): Expose 'Privileged' once it is supported. - //Privileged: job.GetenvBool("Privileged"), - Tty: job.GetenvBool("Tty"), - AttachStdin: job.GetenvBool("AttachStdin"), - AttachStderr: job.GetenvBool("AttachStderr"), - AttachStdout: job.GetenvBool("AttachStdout"), - } - cmd := job.GetenvList("Cmd") - if len(cmd) == 0 { - return nil, fmt.Errorf("No exec command specified") - } - - execConfig.Cmd = cmd - - return execConfig, nil -} - func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { var ( - flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") - flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") - flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") - execCmd []string - container string + flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") + flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") + flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") + flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") + flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to the command") + execCmd []string + container string ) cmd.Require(flag.Min, 2) - if err := utils.ParseFlags(cmd, args, true); err != nil { + if err := cmd.ParseFlags(args, true); err != nil { return nil, err } container = cmd.Arg(0) @@ -58,10 +35,8 @@ func ParseExec(cmd *flag.FlagSet, args []string) (*ExecConfig, error) { execCmd = parsedArgs[1:] execConfig := &ExecConfig{ - // TODO(vishh): Expose '-u' flag once it is supported. - User: "", - // TODO(vishh): Expose '-p' flag once it is supported. - Privileged: false, + User: *flUser, + Privileged: *flPrivileged, Tty: *flTty, Cmd: execCmd, Container: container, diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_config_1_14.json b/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_config_1_14.json new file mode 100644 index 00000000..b08334c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_config_1_14.json @@ -0,0 +1,30 @@ +{ + "Hostname":"", + "Domainname": "", + "User":"", + "Memory": 1000, + "MemorySwap":0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin":false, + "AttachStdout":true, + "AttachStderr":true, + "PortSpecs":null, + "Tty":false, + "OpenStdin":false, + "StdinOnce":false, + "Env":null, + "Cmd":[ + "bash" + ], + "Image":"ubuntu", + "Volumes":{ + "/tmp": {} + }, + "WorkingDir":"", + "NetworkDisabled": false, + "ExposedPorts":{ + "22/tcp": {} + }, + "RestartPolicy": { "Name": "always" } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_config_1_17.json b/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_config_1_17.json new file mode 100644 index 00000000..60fc6e25 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_config_1_17.json @@ -0,0 +1,49 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "Cpuset": "0,1", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "SecurityOpt": [""], + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [] + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_config_1_19.json b/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_config_1_19.json new file mode 100644 index 00000000..9a3ce205 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/fixtures/container_config_1_19.json @@ -0,0 +1,57 @@ +{ + "Hostname": "", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": true, + "AttachStderr": true, + "Tty": false, + "OpenStdin": false, + "StdinOnce": false, + "Env": null, + "Cmd": [ + "date" + ], + "Entrypoint": "bash", + "Image": "ubuntu", + "Labels": { + "com.example.vendor": "Acme", + "com.example.license": "GPL", + "com.example.version": "1.0" + }, + "Volumes": { + "/tmp": {} + }, + "WorkingDir": "", + "NetworkDisabled": false, + "MacAddress": "12:34:56:78:9a:bc", + "ExposedPorts": { + "22/tcp": {} + }, + "HostConfig": { + "Binds": ["/tmp:/tmp"], + "Links": ["redis3:redis"], + "LxcConf": {"lxc.utsname":"docker"}, + "Memory": 1000, + "MemorySwap": 0, + "CpuShares": 512, + "CpusetCpus": "0,1", + "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, + "PublishAllPorts": false, + "Privileged": false, + "ReadonlyRootfs": false, + "Dns": ["8.8.8.8"], + "DnsSearch": [""], + "ExtraHosts": null, + "VolumesFrom": ["parent", "other:ro"], + "CapAdd": ["NET_ADMIN"], + "CapDrop": ["MKNOD"], + "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, + "NetworkMode": "bridge", + "Devices": [], + "Ulimits": [{}], + "LogConfig": { "Type": "json-file", "Config": {} }, + "SecurityOpt": [""], + "CgroupParent": "" + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig.go index 84d636b5..171671b6 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/hostconfig.go @@ -1,14 +1,19 @@ package runconfig import ( + "encoding/json" + "io" "strings" - "github.com/docker/docker/engine" "github.com/docker/docker/nat" "github.com/docker/docker/pkg/ulimit" - "github.com/docker/docker/utils" ) +type KeyValuePair struct { + Key string + Value string +} + type NetworkMode string // IsPrivate indicates whether container use it's private network stack @@ -104,14 +109,65 @@ type LogConfig struct { Config map[string]string } +type LxcConfig struct { + values []KeyValuePair +} + +func (c *LxcConfig) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte{}, nil + } + return json.Marshal(c.Slice()) +} + +func (c *LxcConfig) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return nil + } + + var kv []KeyValuePair + if err := json.Unmarshal(b, &kv); err != nil { + var h map[string]string + if err := json.Unmarshal(b, &h); err != nil { + return err + } + for k, v := range h { + kv = append(kv, KeyValuePair{k, v}) + } + } + c.values = kv + + return nil +} + +func (c *LxcConfig) Len() int { + if c == nil { + return 0 + } + return len(c.values) +} + +func (c *LxcConfig) Slice() []KeyValuePair { + if c == nil { + return nil + } + return c.values +} + +func NewLxcConfig(values []KeyValuePair) *LxcConfig { + return &LxcConfig{values} +} + type HostConfig struct { Binds []string ContainerIDFile string - LxcConf []utils.KeyValuePair + LxcConf *LxcConfig Memory int64 // Memory limit (in bytes) MemorySwap int64 // Total memory usage (memory + swap); set `-1` to disable swap CpuShares int64 // CPU shares (relative weight vs. other containers) CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + CpuQuota int64 Privileged bool PortBindings nat.PortMap Links []string @@ -134,96 +190,55 @@ type HostConfig struct { CgroupParent string // Parent cgroup. } -// This is used by the create command when you want to set both the -// Config and the HostConfig in the same call -type ConfigAndHostConfig struct { - Config - HostConfig HostConfig -} - -func MergeConfigs(config *Config, hostConfig *HostConfig) *ConfigAndHostConfig { - return &ConfigAndHostConfig{ - *config, - *hostConfig, +func MergeConfigs(config *Config, hostConfig *HostConfig) *ContainerConfigWrapper { + return &ContainerConfigWrapper{ + config, + &hostConfigWrapper{InnerHostConfig: hostConfig}, } } -func ContainerHostConfigFromJob(job *engine.Job) *HostConfig { - if job.EnvExists("HostConfig") { - hostConfig := HostConfig{} - job.GetenvJson("HostConfig", &hostConfig) +type hostConfigWrapper struct { + InnerHostConfig *HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. - // FIXME: These are for backward compatibility, if people use these - // options with `HostConfig`, we should still make them workable. - if job.EnvExists("Memory") && hostConfig.Memory == 0 { - hostConfig.Memory = job.GetenvInt64("Memory") - } - if job.EnvExists("MemorySwap") && hostConfig.MemorySwap == 0 { - hostConfig.MemorySwap = job.GetenvInt64("MemorySwap") - } - if job.EnvExists("CpuShares") && hostConfig.CpuShares == 0 { - hostConfig.CpuShares = job.GetenvInt64("CpuShares") - } - if job.EnvExists("Cpuset") && hostConfig.CpusetCpus == "" { - hostConfig.CpusetCpus = job.Getenv("Cpuset") - } - - return &hostConfig - } - - hostConfig := &HostConfig{ - ContainerIDFile: job.Getenv("ContainerIDFile"), - Memory: job.GetenvInt64("Memory"), - MemorySwap: job.GetenvInt64("MemorySwap"), - CpuShares: job.GetenvInt64("CpuShares"), - CpusetCpus: job.Getenv("CpusetCpus"), - Privileged: job.GetenvBool("Privileged"), - PublishAllPorts: job.GetenvBool("PublishAllPorts"), - NetworkMode: NetworkMode(job.Getenv("NetworkMode")), - IpcMode: IpcMode(job.Getenv("IpcMode")), - PidMode: PidMode(job.Getenv("PidMode")), - ReadonlyRootfs: job.GetenvBool("ReadonlyRootfs"), - CgroupParent: job.Getenv("CgroupParent"), - } - - // FIXME: This is for backward compatibility, if people use `Cpuset` - // in json, make it workable, we will only pass hostConfig.CpusetCpus - // to execDriver. - if job.EnvExists("Cpuset") && hostConfig.CpusetCpus == "" { - hostConfig.CpusetCpus = job.Getenv("Cpuset") - } - - job.GetenvJson("LxcConf", &hostConfig.LxcConf) - job.GetenvJson("PortBindings", &hostConfig.PortBindings) - job.GetenvJson("Devices", &hostConfig.Devices) - job.GetenvJson("RestartPolicy", &hostConfig.RestartPolicy) - job.GetenvJson("Ulimits", &hostConfig.Ulimits) - job.GetenvJson("LogConfig", &hostConfig.LogConfig) - hostConfig.SecurityOpt = job.GetenvList("SecurityOpt") - if Binds := job.GetenvList("Binds"); Binds != nil { - hostConfig.Binds = Binds - } - if Links := job.GetenvList("Links"); Links != nil { - hostConfig.Links = Links - } - if Dns := job.GetenvList("Dns"); Dns != nil { - hostConfig.Dns = Dns - } - if DnsSearch := job.GetenvList("DnsSearch"); DnsSearch != nil { - hostConfig.DnsSearch = DnsSearch - } - if ExtraHosts := job.GetenvList("ExtraHosts"); ExtraHosts != nil { - hostConfig.ExtraHosts = ExtraHosts - } - if VolumesFrom := job.GetenvList("VolumesFrom"); VolumesFrom != nil { - hostConfig.VolumesFrom = VolumesFrom - } - if CapAdd := job.GetenvList("CapAdd"); CapAdd != nil { - hostConfig.CapAdd = CapAdd - } - if CapDrop := job.GetenvList("CapDrop"); CapDrop != nil { - hostConfig.CapDrop = CapDrop - } - - return hostConfig + *HostConfig // Deprecated. Exported to read attrubutes from json that are not in the inner host config structure. +} + +func (w hostConfigWrapper) GetHostConfig() *HostConfig { + hc := w.HostConfig + + if hc == nil && w.InnerHostConfig != nil { + hc = w.InnerHostConfig + } else if w.InnerHostConfig != nil { + if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { + w.InnerHostConfig.Memory = hc.Memory + } + if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { + w.InnerHostConfig.MemorySwap = hc.MemorySwap + } + if hc.CpuShares != 0 && w.InnerHostConfig.CpuShares == 0 { + w.InnerHostConfig.CpuShares = hc.CpuShares + } + + hc = w.InnerHostConfig + } + + if hc != nil && w.Cpuset != "" && hc.CpusetCpus == "" { + hc.CpusetCpus = w.Cpuset + } + + return hc +} + +func DecodeHostConfig(src io.Reader) (*HostConfig, error) { + decoder := json.NewDecoder(src) + + var w hostConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, err + } + + hc := w.GetHostConfig() + + return hc, nil } diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/merge.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/merge.go index 9bbdc6ad..9c9a3b43 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/merge.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/merge.go @@ -3,7 +3,7 @@ package runconfig import ( "strings" - log "github.com/Sirupsen/logrus" + "github.com/Sirupsen/logrus" "github.com/docker/docker/nat" ) @@ -11,15 +11,6 @@ func Merge(userConf, imageConf *Config) error { if userConf.User == "" { userConf.User = imageConf.User } - if userConf.Memory == 0 { - userConf.Memory = imageConf.Memory - } - if userConf.MemorySwap == 0 { - userConf.MemorySwap = imageConf.MemorySwap - } - if userConf.CpuShares == 0 { - userConf.CpuShares = imageConf.CpuShares - } if len(userConf.ExposedPorts) == 0 { userConf.ExposedPorts = imageConf.ExposedPorts } else if imageConf.ExposedPorts != nil { @@ -50,7 +41,7 @@ func Merge(userConf, imageConf *Config) error { } if len(imageConf.PortSpecs) > 0 { // FIXME: I think we can safely remove this. Leaving it for now for the sake of reverse-compat paranoia. - log.Debugf("Migrating image port specs to containter: %s", strings.Join(imageConf.PortSpecs, ", ")) + logrus.Debugf("Migrating image port specs to container: %s", strings.Join(imageConf.PortSpecs, ", ")) if userConf.ExposedPorts == nil { userConf.ExposedPorts = make(nat.PortSet) } @@ -94,8 +85,8 @@ func Merge(userConf, imageConf *Config) error { userConf.Labels = imageConf.Labels } - if len(userConf.Entrypoint) == 0 { - if len(userConf.Cmd) == 0 { + if userConf.Entrypoint.Len() == 0 { + if userConf.Cmd.Len() == 0 { userConf.Cmd = imageConf.Cmd } diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse.go index ccd8056c..4ab40698 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse.go @@ -2,7 +2,6 @@ package runconfig import ( "fmt" - "path" "strconv" "strings" @@ -12,11 +11,9 @@ import ( "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/ulimit" "github.com/docker/docker/pkg/units" - "github.com/docker/docker/utils" ) var ( - ErrInvalidWorkingDirectory = fmt.Errorf("The working directory is invalid. It needs to be an absolute path.") ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: --net=container can't be used with links. This would result in undefined behavior.") ErrConflictContainerNetworkAndDns = fmt.Errorf("Conflicting options: --net=container can't be used with --dns. This configuration is invalid.") ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: -h and the network mode (--net)") @@ -65,6 +62,8 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container") flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") flCpusetCpus = cmd.String([]string{"#-cpuset", "-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") + flCpusetMems = cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") + flCpuQuota = cmd.Int64([]string{"-cpu-quota"}, 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") flNetMode = cmd.String([]string{"-net"}, "bridge", "Set the Network mode for the container") flMacAddress = cmd.String([]string{"-mac-address"}, "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") flIpcMode = cmd.String([]string{"-ipc"}, "", "IPC namespace to use") @@ -96,16 +95,11 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe cmd.Require(flag.Min, 1) - if err := utils.ParseFlags(cmd, args, true); err != nil { + if err := cmd.ParseFlags(args, true); err != nil { return nil, nil, cmd, err } - // Validate input params - if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) { - return nil, nil, cmd, ErrInvalidWorkingDirectory - } - - // Validate the input mac address + // Validate input params starting with the input mac address if *flMacAddress != "" { if _, err := opts.ValidateMACAddress(*flMacAddress); err != nil { return nil, nil, cmd, fmt.Errorf("%s is not a valid mac address", *flMacAddress) @@ -125,7 +119,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe return nil, nil, cmd, ErrConflictHostNetworkAndLinks } - if *flNetMode == "container" && flLinks.Len() > 0 { + if strings.HasPrefix(*flNetMode, "container") && flLinks.Len() > 0 { return nil, nil, cmd, ErrConflictContainerNetworkAndLinks } @@ -133,7 +127,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe return nil, nil, cmd, ErrConflictHostNetworkAndDns } - if *flNetMode == "container" && flDns.Len() > 0 { + if strings.HasPrefix(*flNetMode, "container") && flDns.Len() > 0 { return nil, nil, cmd, ErrConflictContainerNetworkAndDns } @@ -186,21 +180,22 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe var ( parsedArgs = cmd.Args() - runCmd []string - entrypoint []string + runCmd *Command + entrypoint *Entrypoint image = cmd.Arg(0) ) if len(parsedArgs) > 1 { - runCmd = parsedArgs[1:] + runCmd = NewCommand(parsedArgs[1:]...) } if *flEntrypoint != "" { - entrypoint = []string{*flEntrypoint} + entrypoint = NewEntrypoint(*flEntrypoint) } - lxcConf, err := parseKeyValueOpts(flLxcOpts) + lc, err := parseKeyValueOpts(flLxcOpts) if err != nil { return nil, nil, cmd, err } + lxcConf := NewLxcConfig(lc) var ( domainname string @@ -275,7 +270,7 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe return nil, nil, cmd, fmt.Errorf("--net: invalid net mode: %v", err) } - restartPolicy, err := parseRestartPolicy(*flRestartPolicy) + restartPolicy, err := ParseRestartPolicy(*flRestartPolicy) if err != nil { return nil, nil, cmd, err } @@ -289,10 +284,6 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe Tty: *flTty, NetworkDisabled: !*flNetwork, OpenStdin: *flStdin, - Memory: flMemory, // FIXME: for backward compatibility - MemorySwap: MemorySwap, // FIXME: for backward compatibility - CpuShares: *flCpuShares, // FIXME: for backward compatibility - Cpuset: *flCpusetCpus, // FIXME: for backward compatibility AttachStdin: attachStdin, AttachStdout: attachStdout, AttachStderr: attachStderr, @@ -314,6 +305,8 @@ func Parse(cmd *flag.FlagSet, args []string) (*Config, *HostConfig, *flag.FlagSe MemorySwap: MemorySwap, CpuShares: *flCpuShares, CpusetCpus: *flCpusetCpus, + CpusetMems: *flCpusetMems, + CpuQuota: *flCpuQuota, Privileged: *flPrivileged, PortBindings: portBindings, Links: flLinks.GetAll(), @@ -374,8 +367,8 @@ func convertKVStringsToMap(values []string) map[string]string { return result } -// parseRestartPolicy returns the parsed policy or an error indicating what is incorrect -func parseRestartPolicy(policy string) (RestartPolicy, error) { +// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect +func ParseRestartPolicy(policy string) (RestartPolicy, error) { p := RestartPolicy{} if policy == "" { @@ -430,14 +423,14 @@ func parseDriverOpts(opts opts.ListOpts) (map[string][]string, error) { return out, nil } -func parseKeyValueOpts(opts opts.ListOpts) ([]utils.KeyValuePair, error) { - out := make([]utils.KeyValuePair, opts.Len()) +func parseKeyValueOpts(opts opts.ListOpts) ([]KeyValuePair, error) { + out := make([]KeyValuePair, opts.Len()) for i, o := range opts.GetAll() { k, v, err := parsers.ParseKeyValueOpt(o) if err != nil { return nil, err } - out[i] = utils.KeyValuePair{Key: k, Value: v} + out[i] = KeyValuePair{Key: k, Value: v} } return out, nil } diff --git a/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_test.go b/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_test.go index cd90dc3a..6c0a1cfc 100644 --- a/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_test.go +++ b/Godeps/_workspace/src/github.com/docker/docker/runconfig/parse_test.go @@ -57,3 +57,9 @@ func TestNetHostname(t *testing.T) { t.Fatalf("Expected error ErrConflictNetworkHostname, got: %s", err) } } + +func TestConflictContainerNetworkAndLinks(t *testing.T) { + if _, _, _, err := parseRun([]string{"--net=container:other", "--link=zip:zap", "img", "cmd"}); err != ErrConflictContainerNetworkAndLinks { + t.Fatalf("Expected error ErrConflictContainerNetworkAndLinks, got: %s", err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/daemon.go b/Godeps/_workspace/src/github.com/docker/docker/utils/daemon.go deleted file mode 100644 index 871122ed..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/daemon.go +++ /dev/null @@ -1,36 +0,0 @@ -package utils - -import ( - "fmt" - "io/ioutil" - "log" - "os" - "strconv" -) - -func CreatePidFile(pidfile string) error { - if pidString, err := ioutil.ReadFile(pidfile); err == nil { - pid, err := strconv.Atoi(string(pidString)) - if err == nil { - if _, err := os.Stat(fmt.Sprintf("/proc/%d/", pid)); err == nil { - return fmt.Errorf("pid file found, ensure docker is not running or delete %s", pidfile) - } - } - } - - file, err := os.Create(pidfile) - if err != nil { - return err - } - - defer file.Close() - - _, err = fmt.Fprintf(file, "%d", os.Getpid()) - return err -} - -func RemovePidFile(pidfile string) { - if err := os.Remove(pidfile); err != nil { - log.Printf("Error removing %s: %s", pidfile, err) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/flags.go b/Godeps/_workspace/src/github.com/docker/docker/utils/flags.go deleted file mode 100644 index 33c72279..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/flags.go +++ /dev/null @@ -1,45 +0,0 @@ -package utils - -import ( - "fmt" - "os" - - flag "github.com/docker/docker/pkg/mflag" -) - -// ParseFlags is a utility function that adds a help flag if withHelp is true, -// calls cmd.Parse(args) and prints a relevant error message if there are -// incorrect number of arguments. It returns error only if error handling is -// set to ContinueOnError and parsing fails. If error handling is set to -// ExitOnError, it's safe to ignore the return value. -// TODO: move this to a better package than utils -func ParseFlags(cmd *flag.FlagSet, args []string, withHelp bool) error { - var help *bool - if withHelp { - help = cmd.Bool([]string{"#help", "-help"}, false, "Print usage") - } - if err := cmd.Parse(args); err != nil { - return err - } - if help != nil && *help { - cmd.Usage() - // just in case Usage does not exit - os.Exit(0) - } - if str := cmd.CheckArgs(); str != "" { - ReportError(cmd, str, withHelp) - } - return nil -} - -func ReportError(cmd *flag.FlagSet, str string, withHelp bool) { - if withHelp { - if os.Args[0] == cmd.Name() { - str += ". See '" + os.Args[0] + " --help'" - } else { - str += ". See '" + os.Args[0] + " " + cmd.Name() + " --help'" - } - } - fmt.Fprintf(cmd.Out(), "docker: %s.\n", str) - os.Exit(1) -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/http.go b/Godeps/_workspace/src/github.com/docker/docker/utils/http.go deleted file mode 100644 index 24eaea56..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/http.go +++ /dev/null @@ -1,168 +0,0 @@ -package utils - -import ( - "io" - "net/http" - "strings" - - log "github.com/Sirupsen/logrus" -) - -// VersionInfo is used to model entities which has a version. -// It is basically a tupple with name and version. -type VersionInfo interface { - Name() string - Version() string -} - -func validVersion(version VersionInfo) bool { - const stopChars = " \t\r\n/" - name := version.Name() - vers := version.Version() - if len(name) == 0 || strings.ContainsAny(name, stopChars) { - return false - } - if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { - return false - } - return true -} - -// Convert versions to a string and append the string to the string base. -// -// Each VersionInfo will be converted to a string in the format of -// "product/version", where the "product" is get from the Name() method, while -// version is get from the Version() method. Several pieces of verson information -// will be concatinated and separated by space. -func appendVersions(base string, versions ...VersionInfo) string { - if len(versions) == 0 { - return base - } - - verstrs := make([]string, 0, 1+len(versions)) - if len(base) > 0 { - verstrs = append(verstrs, base) - } - - for _, v := range versions { - if !validVersion(v) { - continue - } - verstrs = append(verstrs, v.Name()+"/"+v.Version()) - } - return strings.Join(verstrs, " ") -} - -// HTTPRequestDecorator is used to change an instance of -// http.Request. It could be used to add more header fields, -// change body, etc. -type HTTPRequestDecorator interface { - // ChangeRequest() changes the request accordingly. - // The changed request will be returned or err will be non-nil - // if an error occur. - ChangeRequest(req *http.Request) (newReq *http.Request, err error) -} - -// HTTPUserAgentDecorator appends the product/version to the user agent field -// of a request. -type HTTPUserAgentDecorator struct { - versions []VersionInfo -} - -func NewHTTPUserAgentDecorator(versions ...VersionInfo) HTTPRequestDecorator { - return &HTTPUserAgentDecorator{ - versions: versions, - } -} - -func (h *HTTPUserAgentDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) { - if req == nil { - return req, nil - } - - userAgent := appendVersions(req.UserAgent(), h.versions...) - if len(userAgent) > 0 { - req.Header.Set("User-Agent", userAgent) - } - return req, nil -} - -type HTTPMetaHeadersDecorator struct { - Headers map[string][]string -} - -func (h *HTTPMetaHeadersDecorator) ChangeRequest(req *http.Request) (newReq *http.Request, err error) { - if h.Headers == nil { - return req, nil - } - for k, v := range h.Headers { - req.Header[k] = v - } - return req, nil -} - -type HTTPAuthDecorator struct { - login string - password string -} - -func NewHTTPAuthDecorator(login, password string) HTTPRequestDecorator { - return &HTTPAuthDecorator{ - login: login, - password: password, - } -} - -func (self *HTTPAuthDecorator) ChangeRequest(req *http.Request) (*http.Request, error) { - req.SetBasicAuth(self.login, self.password) - return req, nil -} - -// HTTPRequestFactory creates an HTTP request -// and applies a list of decorators on the request. -type HTTPRequestFactory struct { - decorators []HTTPRequestDecorator -} - -func NewHTTPRequestFactory(d ...HTTPRequestDecorator) *HTTPRequestFactory { - return &HTTPRequestFactory{ - decorators: d, - } -} - -func (self *HTTPRequestFactory) AddDecorator(d ...HTTPRequestDecorator) { - self.decorators = append(self.decorators, d...) -} - -func (self *HTTPRequestFactory) GetDecorators() []HTTPRequestDecorator { - return self.decorators -} - -// NewRequest() creates a new *http.Request, -// applies all decorators in the HTTPRequestFactory on the request, -// then applies decorators provided by d on the request. -func (h *HTTPRequestFactory) NewRequest(method, urlStr string, body io.Reader, d ...HTTPRequestDecorator) (*http.Request, error) { - req, err := http.NewRequest(method, urlStr, body) - if err != nil { - return nil, err - } - - // By default, a nil factory should work. - if h == nil { - return req, nil - } - for _, dec := range h.decorators { - req, err = dec.ChangeRequest(req) - if err != nil { - return nil, err - } - } - for _, dec := range d { - req, err = dec.ChangeRequest(req) - if err != nil { - return nil, err - } - } - log.Debugf("%v -- HEADERS: %v", req.URL, req.Header) - return req, err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage.go b/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage.go deleted file mode 100644 index 74d31127..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage.go +++ /dev/null @@ -1,172 +0,0 @@ -package utils - -import ( - "encoding/json" - "fmt" - "io" - "strings" - "time" - - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/pkg/timeutils" - "github.com/docker/docker/pkg/units" -) - -type JSONError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (e *JSONError) Error() string { - return e.Message -} - -type JSONProgress struct { - terminalFd uintptr - Current int `json:"current,omitempty"` - Total int `json:"total,omitempty"` - Start int64 `json:"start,omitempty"` -} - -func (p *JSONProgress) String() string { - var ( - width = 200 - pbBox string - numbersBox string - timeLeftBox string - ) - - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - width = int(ws.Width) - } - - if p.Current <= 0 && p.Total <= 0 { - return "" - } - current := units.HumanSize(float64(p.Current)) - if p.Total <= 0 { - return fmt.Sprintf("%8v", current) - } - total := units.HumanSize(float64(p.Total)) - percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 - if percentage > 50 { - percentage = 50 - } - if width > 110 { - // this number can't be negetive gh#7136 - numSpaces := 0 - if 50-percentage > 0 { - numSpaces = 50 - percentage - } - pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) - } - numbersBox = fmt.Sprintf("%8v/%v", current, total) - - if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0)) - perEntry := fromStart / time.Duration(p.Current) - left := time.Duration(p.Total-p.Current) * perEntry - left = (left / time.Second) * time.Second - - if width > 50 { - timeLeftBox = " " + left.String() - } - } - return pbBox + numbersBox + timeLeftBox -} - -type JSONMessage struct { - Stream string `json:"stream,omitempty"` - Status string `json:"status,omitempty"` - Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` //deprecated - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` //deprecated -} - -func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { - if jm.Error != nil { - if jm.Error.Code == 401 { - return fmt.Errorf("Authentication is required.") - } - return jm.Error - } - var endl string - if isTerminal && jm.Stream == "" && jm.Progress != nil { - // [2K = erase entire current line - fmt.Fprintf(out, "%c[2K\r", 27) - endl = "\r" - } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal - return nil - } - if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed)) - } - if jm.ID != "" { - fmt.Fprintf(out, "%s: ", jm.ID) - } - if jm.From != "" { - fmt.Fprintf(out, "(from %s) ", jm.From) - } - if jm.Progress != nil && isTerminal { - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - } else if jm.ProgressMessage != "" { //deprecated - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - } else if jm.Stream != "" { - fmt.Fprintf(out, "%s%s", jm.Stream, endl) - } else { - fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - } - return nil -} - -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool) error { - var ( - dec = json.NewDecoder(in) - ids = make(map[string]int) - diff = 0 - ) - for { - var jm JSONMessage - if err := dec.Decode(&jm); err != nil { - if err == io.EOF { - break - } - return err - } - - if jm.Progress != nil { - jm.Progress.terminalFd = terminalFd - } - if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { - line, ok := ids[jm.ID] - if !ok { - line = len(ids) - ids[jm.ID] = line - if isTerminal { - fmt.Fprintf(out, "\n") - } - diff = 0 - } else { - diff = len(ids) - line - } - if jm.ID != "" && isTerminal { - // [{diff}A = move cursor up diff rows - fmt.Fprintf(out, "%c[%dA", 27, diff) - } - } - err := jm.Display(out, isTerminal) - if jm.ID != "" && isTerminal { - // [{diff}B = move cursor down diff rows - fmt.Fprintf(out, "%c[%dB", 27, diff) - } - if err != nil { - return err - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage_test.go b/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage_test.go deleted file mode 100644 index b9103da1..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/jsonmessage_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package utils - -import ( - "testing" -) - -func TestError(t *testing.T) { - je := JSONError{404, "Not found"} - if je.Error() != "Not found" { - t.Fatalf("Expected 'Not found' got '%s'", je.Error()) - } -} - -func TestProgress(t *testing.T) { - jp := JSONProgress{} - if jp.String() != "" { - t.Fatalf("Expected empty string, got '%s'", jp.String()) - } - - expected := " 1 B" - jp2 := JSONProgress{Current: 1} - if jp2.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp2.String()) - } - - expected = "[=========================> ] 50 B/100 B" - jp3 := JSONProgress{Current: 50, Total: 100} - if jp3.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp3.String()) - } - - // this number can't be negetive gh#7136 - expected = "[==================================================>] 50 B/40 B" - jp4 := JSONProgress{Current: 50, Total: 40} - if jp4.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp4.String()) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter.go b/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter.go deleted file mode 100644 index e5b15f98..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter.go +++ /dev/null @@ -1,121 +0,0 @@ -package utils - -import ( - "encoding/json" - "fmt" - "github.com/docker/docker/pkg/progressreader" - "io" -) - -type StreamFormatter struct { - json bool -} - -func NewStreamFormatter(json bool) *StreamFormatter { - return &StreamFormatter{json} -} - -const streamNewline = "\r\n" - -var streamNewlineBytes = []byte(streamNewline) - -func (sf *StreamFormatter) FormatStream(str string) []byte { - if sf.json { - b, err := json.Marshal(&JSONMessage{Stream: str}) - if err != nil { - return sf.FormatError(err) - } - return append(b, streamNewlineBytes...) - } - return []byte(str + "\r") -} - -func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { - str := fmt.Sprintf(format, a...) - if sf.json { - b, err := json.Marshal(&JSONMessage{ID: id, Status: str}) - if err != nil { - return sf.FormatError(err) - } - return append(b, streamNewlineBytes...) - } - return []byte(str + streamNewline) -} - -func (sf *StreamFormatter) FormatError(err error) []byte { - if sf.json { - jsonError, ok := err.(*JSONError) - if !ok { - jsonError = &JSONError{Message: err.Error()} - } - if b, err := json.Marshal(&JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { - return append(b, streamNewlineBytes...) - } - return []byte("{\"error\":\"format error\"}" + streamNewline) - } - return []byte("Error: " + err.Error() + streamNewline) -} -func (sf *StreamFormatter) FormatProg(id, action string, p interface{}) []byte { - switch progress := p.(type) { - case *JSONProgress: - return sf.FormatProgress(id, action, progress) - case progressreader.PR_JSONProgress: - return sf.FormatProgress(id, action, &JSONProgress{Current: progress.GetCurrent(), Total: progress.GetTotal()}) - } - return nil -} -func (sf *StreamFormatter) FormatProgress(id, action string, progress *JSONProgress) []byte { - if progress == nil { - progress = &JSONProgress{} - } - if sf.json { - - b, err := json.Marshal(&JSONMessage{ - Status: action, - ProgressMessage: progress.String(), - Progress: progress, - ID: id, - }) - if err != nil { - return nil - } - return b - } - endl := "\r" - if progress.String() == "" { - endl += "\n" - } - return []byte(action + " " + progress.String() + endl) -} - -func (sf *StreamFormatter) Json() bool { - return sf.json -} - -type StdoutFormater struct { - io.Writer - *StreamFormatter -} - -func (sf *StdoutFormater) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} - -type StderrFormater struct { - io.Writer - *StreamFormatter -} - -func (sf *StderrFormater) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter_test.go b/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter_test.go deleted file mode 100644 index 20610f6c..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/streamformatter_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package utils - -import ( - "encoding/json" - "errors" - "reflect" - "testing" -) - -func TestFormatStream(t *testing.T) { - sf := NewStreamFormatter(true) - res := sf.FormatStream("stream") - if string(res) != `{"stream":"stream"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestFormatStatus(t *testing.T) { - sf := NewStreamFormatter(true) - res := sf.FormatStatus("ID", "%s%d", "a", 1) - if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestFormatSimpleError(t *testing.T) { - sf := NewStreamFormatter(true) - res := sf.FormatError(errors.New("Error for formatter")) - if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestFormatJSONError(t *testing.T) { - sf := NewStreamFormatter(true) - err := &JSONError{Code: 50, Message: "Json error"} - res := sf.FormatError(err) - if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestFormatProgress(t *testing.T) { - sf := NewStreamFormatter(true) - progress := &JSONProgress{ - Current: 15, - Total: 30, - Start: 1, - } - res := sf.FormatProgress("id", "action", progress) - msg := &JSONMessage{} - if err := json.Unmarshal(res, msg); err != nil { - t.Fatal(err) - } - if msg.ID != "id" { - t.Fatalf("ID must be 'id', got: %s", msg.ID) - } - if msg.Status != "action" { - t.Fatalf("Status must be 'action', got: %s", msg.Status) - } - if msg.ProgressMessage != progress.String() { - t.Fatalf("ProgressMessage must be %s, got: %s", progress.String(), msg.ProgressMessage) - } - if !reflect.DeepEqual(msg.Progress, progress) { - t.Fatal("Original progress not equals progress from FormatProgress") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/tmpdir.go b/Godeps/_workspace/src/github.com/docker/docker/utils/tmpdir.go deleted file mode 100644 index e200f340..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/tmpdir.go +++ /dev/null @@ -1,16 +0,0 @@ -package utils - -import ( - "os" - "path/filepath" -) - -// TempDir returns the default directory to use for temporary files. -func TempDir(rootDir string) (string, error) { - var tmpDir string - if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { - tmpDir = filepath.Join(rootDir, "tmp") - } - err := os.MkdirAll(tmpDir, 0700) - return tmpDir, err -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/utils.go b/Godeps/_workspace/src/github.com/docker/docker/utils/utils.go deleted file mode 100644 index 540ae6f5..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/utils.go +++ /dev/null @@ -1,554 +0,0 @@ -package utils - -import ( - "bufio" - "bytes" - "crypto/sha1" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "os/exec" - "path/filepath" - "regexp" - "runtime" - "strings" - "sync" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/autogen/dockerversion" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/common" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/ioutils" -) - -type KeyValuePair struct { - Key string - Value string -} - -var ( - validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) -) - -// Request a given URL and return an io.Reader -func Download(url string) (resp *http.Response, err error) { - if resp, err = http.Get(url); err != nil { - return nil, err - } - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) - } - return resp, nil -} - -func Trunc(s string, maxlen int) string { - if len(s) <= maxlen { - return s - } - return s[:maxlen] -} - -// Figure out the absolute path of our own binary (if it's still around). -func SelfPath() string { - path, err := exec.LookPath(os.Args[0]) - if err != nil { - if os.IsNotExist(err) { - return "" - } - if execErr, ok := err.(*exec.Error); ok && os.IsNotExist(execErr.Err) { - return "" - } - panic(err) - } - path, err = filepath.Abs(path) - if err != nil { - if os.IsNotExist(err) { - return "" - } - panic(err) - } - return path -} - -func dockerInitSha1(target string) string { - f, err := os.Open(target) - if err != nil { - return "" - } - defer f.Close() - h := sha1.New() - _, err = io.Copy(h, f) - if err != nil { - return "" - } - return hex.EncodeToString(h.Sum(nil)) -} - -func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this) - if target == "" { - return false - } - if dockerversion.IAMSTATIC == "true" { - if selfPath == "" { - return false - } - if target == selfPath { - return true - } - targetFileInfo, err := os.Lstat(target) - if err != nil { - return false - } - selfPathFileInfo, err := os.Lstat(selfPath) - if err != nil { - return false - } - return os.SameFile(targetFileInfo, selfPathFileInfo) - } - return dockerversion.INITSHA1 != "" && dockerInitSha1(target) == dockerversion.INITSHA1 -} - -// Figure out the path of our dockerinit (which may be SelfPath()) -func DockerInitPath(localCopy string) string { - selfPath := SelfPath() - if isValidDockerInitPath(selfPath, selfPath) { - // if we're valid, don't bother checking anything else - return selfPath - } - var possibleInits = []string{ - localCopy, - dockerversion.INITPATH, - filepath.Join(filepath.Dir(selfPath), "dockerinit"), - - // FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec." - // http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec - "/usr/libexec/docker/dockerinit", - "/usr/local/libexec/docker/dockerinit", - - // FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts." - // http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA - "/usr/lib/docker/dockerinit", - "/usr/local/lib/docker/dockerinit", - } - for _, dockerInit := range possibleInits { - if dockerInit == "" { - continue - } - path, err := exec.LookPath(dockerInit) - if err == nil { - path, err = filepath.Abs(path) - if err != nil { - // LookPath already validated that this file exists and is executable (following symlinks), so how could Abs fail? - panic(err) - } - if isValidDockerInitPath(path, selfPath) { - return path - } - } - } - return "" -} - -func GetTotalUsedFds() int { - if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - log.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) - } else { - return len(fds) - } - return -1 -} - -func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - err := fmt.Errorf("image ID '%s' is invalid", id) - return err - } - return nil -} - -// Code c/c from io.Copy() modified to handle escape sequence -func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { - buf := make([]byte, 32*1024) - for { - nr, er := src.Read(buf) - if nr > 0 { - // ---- Docker addition - // char 16 is C-p - if nr == 1 && buf[0] == 16 { - nr, er = src.Read(buf) - // char 17 is C-q - if nr == 1 && buf[0] == 17 { - if err := src.Close(); err != nil { - return 0, err - } - return 0, nil - } - } - // ---- End of docker - nw, ew := dst.Write(buf[0:nr]) - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er == io.EOF { - break - } - if er != nil { - err = er - break - } - } - return written, err -} - -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - -type WriteFlusher struct { - sync.Mutex - w io.Writer - flusher http.Flusher -} - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - wf.Lock() - defer wf.Unlock() - n, err = wf.w.Write(b) - wf.flusher.Flush() - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - wf.Lock() - defer wf.Unlock() - wf.flusher.Flush() -} - -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var flusher http.Flusher - if f, ok := w.(http.Flusher); ok { - flusher = f - } else { - flusher = &ioutils.NopFlusher{} - } - return &WriteFlusher{w: w, flusher: flusher} -} - -func NewHTTPRequestError(msg string, res *http.Response) error { - return &JSONError{ - Message: msg, - Code: res.StatusCode, - } -} - -// An StatusError reports an unsuccessful exit by a command. -type StatusError struct { - Status string - StatusCode int -} - -func (e *StatusError) Error() string { - return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) -} - -func quote(word string, buf *bytes.Buffer) { - // Bail out early for "simple" strings - if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { - buf.WriteString(word) - return - } - - buf.WriteString("'") - - for i := 0; i < len(word); i++ { - b := word[i] - if b == '\'' { - // Replace literal ' with a close ', a \', and a open ' - buf.WriteString("'\\''") - } else { - buf.WriteByte(b) - } - } - - buf.WriteString("'") -} - -// Take a list of strings and escape them so they will be handled right -// when passed as arguments to an program via a shell -func ShellQuoteArguments(args []string) string { - var buf bytes.Buffer - for i, arg := range args { - if i != 0 { - buf.WriteByte(' ') - } - quote(arg, &buf) - } - return buf.String() -} - -var globalTestID string - -// TestDirectory creates a new temporary directory and returns its path. -// The contents of directory at path `templateDir` is copied into the -// new directory. -func TestDirectory(templateDir string) (dir string, err error) { - if globalTestID == "" { - globalTestID = common.RandomString()[:4] - } - prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) - if prefix == "" { - prefix = "docker-test-" - } - dir, err = ioutil.TempDir("", prefix) - if err = os.Remove(dir); err != nil { - return - } - if templateDir != "" { - if err = archive.CopyWithTar(templateDir, dir); err != nil { - return - } - } - return -} - -// GetCallerName introspects the call stack and returns the name of the -// function `depth` levels down in the stack. -func GetCallerName(depth int) string { - // Use the caller function name as a prefix. - // This helps trace temp directories back to their test. - pc, _, _, _ := runtime.Caller(depth + 1) - callerLongName := runtime.FuncForPC(pc).Name() - parts := strings.Split(callerLongName, ".") - callerShortName := parts[len(parts)-1] - return callerShortName -} - -func CopyFile(src, dst string) (int64, error) { - if src == dst { - return 0, nil - } - sf, err := os.Open(src) - if err != nil { - return 0, err - } - defer sf.Close() - if err := os.Remove(dst); err != nil && !os.IsNotExist(err) { - return 0, err - } - df, err := os.Create(dst) - if err != nil { - return 0, err - } - defer df.Close() - return io.Copy(df, sf) -} - -// ReplaceOrAppendValues returns the defaults with the overrides either -// replaced by env key or appended to the list -func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { - cache := make(map[string]int, len(defaults)) - for i, e := range defaults { - parts := strings.SplitN(e, "=", 2) - cache[parts[0]] = i - } - - for _, value := range overrides { - // Values w/o = means they want this env to be removed/unset. - if !strings.Contains(value, "=") { - if i, exists := cache[value]; exists { - defaults[i] = "" // Used to indicate it should be removed - } - continue - } - - // Just do a normal set/update - parts := strings.SplitN(value, "=", 2) - if i, exists := cache[parts[0]]; exists { - defaults[i] = value - } else { - defaults = append(defaults, value) - } - } - - // Now remove all entries that we want to "unset" - for i := 0; i < len(defaults); i++ { - if defaults[i] == "" { - defaults = append(defaults[:i], defaults[i+1:]...) - i-- - } - } - - return defaults -} - -func DoesEnvExist(name string) bool { - for _, entry := range os.Environ() { - parts := strings.SplitN(entry, "=", 2) - if parts[0] == name { - return true - } - } - return false -} - -// ReadSymlinkedDirectory returns the target directory of a symlink. -// The target of the symbolic link may not be a file. -func ReadSymlinkedDirectory(path string) (string, error) { - var realPath string - var err error - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) - } - realPathInfo, err := os.Stat(realPath) - if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) - } - if !realPathInfo.Mode().IsDir() { - return "", fmt.Errorf("canonical path points to a file '%s'", realPath) - } - return realPath, nil -} - -// ValidateContextDirectory checks if all the contents of the directory -// can be read and returns an error if some files can't be read -// symlinks which point to non-existing files don't trigger an error -func ValidateContextDirectory(srcPath string, excludes []string) error { - return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { - // skip this directory/file if it's not in the path, it won't get added to the context - if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil { - return err - } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { - return err - } else if skip { - if f.IsDir() { - return filepath.SkipDir - } - return nil - } - - if err != nil { - if os.IsPermission(err) { - return fmt.Errorf("can't stat '%s'", filePath) - } - if os.IsNotExist(err) { - return nil - } - return err - } - - // skip checking if symlinks point to non-existing files, such symlinks can be useful - // also skip named pipes, because they hanging on open - if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { - return nil - } - - if !f.IsDir() { - currentFile, err := os.Open(filePath) - if err != nil && os.IsPermission(err) { - return fmt.Errorf("no permission to read from '%s'", filePath) - } - currentFile.Close() - } - return nil - }) -} - -func StringsContainsNoCase(slice []string, s string) bool { - for _, ss := range slice { - if strings.ToLower(s) == strings.ToLower(ss) { - return true - } - } - return false -} - -// Reads a .dockerignore file and returns the list of file patterns -// to ignore. Note this will trim whitespace from each line as well -// as use GO's "clean" func to get the shortest/cleanest path for each. -func ReadDockerIgnore(path string) ([]string, error) { - // Note that a missing .dockerignore file isn't treated as an error - reader, err := os.Open(path) - if err != nil { - if !os.IsNotExist(err) { - return nil, fmt.Errorf("Error reading '%s': %v", path, err) - } - return nil, nil - } - defer reader.Close() - - scanner := bufio.NewScanner(reader) - var excludes []string - - for scanner.Scan() { - pattern := strings.TrimSpace(scanner.Text()) - if pattern == "" { - continue - } - pattern = filepath.Clean(pattern) - excludes = append(excludes, pattern) - } - if err = scanner.Err(); err != nil { - return nil, fmt.Errorf("Error reading '%s': %v", path, err) - } - return excludes, nil -} - -// Wrap a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} - -// ImageReference combines `repo` and `ref` and returns a string representing -// the combination. If `ref` is a digest (meaning it's of the form -// :, the returned string is @. Otherwise, -// ref is assumed to be a tag, and the returned string is :. -func ImageReference(repo, ref string) string { - if DigestReference(ref) { - return repo + "@" + ref - } - return repo + ":" + ref -} - -// DigestReference returns true if ref is a digest reference; i.e. if it -// is of the form :. -func DigestReference(ref string) bool { - return strings.Contains(ref, ":") -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/utils_daemon.go b/Godeps/_workspace/src/github.com/docker/docker/utils/utils_daemon.go deleted file mode 100644 index 3f8f4d56..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/utils_daemon.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build daemon - -package utils - -import ( - "github.com/docker/docker/pkg/system" - "os" -) - -// IsFileOwner checks whether the current user is the owner of the given file. -func IsFileOwner(f string) bool { - if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { - if int(fileInfo.Uid()) == os.Getuid() { - return true - } - } - return false -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/utils_daemon_test.go b/Godeps/_workspace/src/github.com/docker/docker/utils/utils_daemon_test.go deleted file mode 100644 index e8361489..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/utils_daemon_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package utils - -import ( - "os" - "path" - "testing" -) - -func TestIsFileOwner(t *testing.T) { - var err error - var file *os.File - - if file, err = os.Create(path.Join(os.TempDir(), "testIsFileOwner")); err != nil { - t.Fatalf("failed to create file: %s", err) - } - file.Close() - - if ok := IsFileOwner(path.Join(os.TempDir(), "testIsFileOwner")); !ok { - t.Fatalf("User should be owner of file") - } - - if err = os.Remove(path.Join(os.TempDir(), "testIsFileOwner")); err != nil { - t.Fatalf("failed to remove file: %s", err) - } - -} diff --git a/Godeps/_workspace/src/github.com/docker/docker/utils/utils_test.go b/Godeps/_workspace/src/github.com/docker/docker/utils/utils_test.go deleted file mode 100644 index 94303a0e..00000000 --- a/Godeps/_workspace/src/github.com/docker/docker/utils/utils_test.go +++ /dev/null @@ -1,154 +0,0 @@ -package utils - -import ( - "bytes" - "os" - "strings" - "testing" -) - -func TestReplaceAndAppendEnvVars(t *testing.T) { - var ( - d = []string{"HOME=/"} - o = []string{"HOME=/root", "TERM=xterm"} - ) - - env := ReplaceOrAppendEnvValues(d, o) - if len(env) != 2 { - t.Fatalf("expected len of 2 got %d", len(env)) - } - if env[0] != "HOME=/root" { - t.Fatalf("expected HOME=/root got '%s'", env[0]) - } - if env[1] != "TERM=xterm" { - t.Fatalf("expected TERM=xterm got '%s'", env[1]) - } -} - -// Reading a symlink to a directory must return the directory -func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { - var err error - if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { - t.Errorf("failed to create directory: %s", err) - } - - if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { - t.Fatalf("failed to read symlink to directory: %s", err) - } - - if path != "/tmp/testReadSymlinkToExistingDirectory" { - t.Fatalf("symlink returned unexpected directory: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { - t.Errorf("failed to remove temporary directory: %s", err) - } - - if err = os.Remove("/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} - -// Reading a non-existing symlink must fail -func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { - var path string - var err error - if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { - t.Fatalf("error expected for non-existing symlink") - } - - if path != "" { - t.Fatalf("expected empty path, but '%s' was returned", path) - } -} - -// Reading a symlink to a file must fail -func TestReadSymlinkedDirectoryToFile(t *testing.T) { - var err error - var file *os.File - - if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { - t.Fatalf("failed to create file: %s", err) - } - - file.Close() - - if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { - t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") - } - - if path != "" { - t.Fatalf("path should've been empty: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { - t.Errorf("failed to remove file: %s", err) - } - - if err = os.Remove("/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} - -func TestWriteCounter(t *testing.T) { - dummy1 := "This is a dummy string." - dummy2 := "This is another dummy string." - totalLength := int64(len(dummy1) + len(dummy2)) - - reader1 := strings.NewReader(dummy1) - reader2 := strings.NewReader(dummy2) - - var buffer bytes.Buffer - wc := NewWriteCounter(&buffer) - - reader1.WriteTo(wc) - reader2.WriteTo(wc) - - if wc.Count != totalLength { - t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) - } - - if buffer.String() != dummy1+dummy2 { - t.Error("Wrong message written") - } -} - -func TestImageReference(t *testing.T) { - tests := []struct { - repo string - ref string - expected string - }{ - {"repo", "tag", "repo:tag"}, - {"repo", "sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64", "repo@sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64"}, - } - - for i, test := range tests { - actual := ImageReference(test.repo, test.ref) - if test.expected != actual { - t.Errorf("%d: expected %q, got %q", i, test.expected, actual) - } - } -} - -func TestDigestReference(t *testing.T) { - input := "sha256:c100b11b25d0cacd52c14e0e7bf525e1a4c0e6aec8827ae007055545909d1a64" - if !DigestReference(input) { - t.Errorf("Expected DigestReference=true for input %q", input) - } - - input = "latest" - if DigestReference(input) { - t.Errorf("Unexpected DigestReference=true for input %q", input) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md deleted file mode 100644 index 05be0f8a..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md +++ /dev/null @@ -1,13 +0,0 @@ -# Contributing to libtrust - -Want to hack on libtrust? Awesome! Here are instructions to get you -started. - -libtrust is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read -[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). - -Happy hacking! diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE b/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE deleted file mode 100644 index 27448585..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS deleted file mode 100644 index 9768175f..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS +++ /dev/null @@ -1,3 +0,0 @@ -Solomon Hykes -Josh Hawn (github: jlhawn) -Derek McGowan (github: dmcgowan) diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/README.md b/Godeps/_workspace/src/github.com/docker/libtrust/README.md deleted file mode 100644 index 8e7db381..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# libtrust - -Libtrust is library for managing authentication and authorization using public key cryptography. - -Authentication is handled using the identity attached to the public key. -Libtrust provides multiple methods to prove possession of the private key associated with an identity. - - TLS x509 certificates - - Signature verification - - Key Challenge - -Authorization and access control is managed through a distributed trust graph. -Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. - -## Copyright and license - -Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. -Docs released under Creative commons. - diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go b/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go deleted file mode 100644 index 3dcca33c..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go +++ /dev/null @@ -1,175 +0,0 @@ -package libtrust - -import ( - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "io/ioutil" - "math/big" - "net" - "time" -) - -type certTemplateInfo struct { - commonName string - domains []string - ipAddresses []net.IP - isCA bool - clientAuth bool - serverAuth bool -} - -func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { - // Generate a certificate template which is valid from the past week to - // 10 years from now. The usage of the certificate depends on the - // specified fields in the given certTempInfo object. - var ( - keyUsage x509.KeyUsage - extKeyUsage []x509.ExtKeyUsage - ) - - if info.isCA { - keyUsage = x509.KeyUsageCertSign - } - - if info.clientAuth { - extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) - } - - if info.serverAuth { - extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) - } - - return &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: info.commonName, - }, - NotBefore: time.Now().Add(-time.Hour * 24 * 7), - NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), - DNSNames: info.domains, - IPAddresses: info.ipAddresses, - IsCA: info.isCA, - KeyUsage: keyUsage, - ExtKeyUsage: extKeyUsage, - BasicConstraintsValid: info.isCA, - } -} - -func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { - pubCertTemplate := generateCertTemplate(subInfo) - privCertTemplate := generateCertTemplate(issInfo) - - certDER, err := x509.CreateCertificate( - rand.Reader, pubCertTemplate, privCertTemplate, - pub.CryptoPublicKey(), priv.CryptoPrivateKey(), - ) - if err != nil { - return nil, fmt.Errorf("failed to create certificate: %s", err) - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, fmt.Errorf("failed to parse certificate: %s", err) - } - - return -} - -// GenerateSelfSignedServerCert creates a self-signed certificate for the -// given key which is to be used for TLS servers with the given domains and -// IP addresses. -func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { - info := &certTemplateInfo{ - commonName: key.KeyID(), - domains: domains, - ipAddresses: ipAddresses, - serverAuth: true, - } - - return generateCert(key.PublicKey(), key, info, info) -} - -// GenerateSelfSignedClientCert creates a self-signed certificate for the -// given key which is to be used for TLS clients. -func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { - info := &certTemplateInfo{ - commonName: key.KeyID(), - clientAuth: true, - } - - return generateCert(key.PublicKey(), key, info, info) -} - -// GenerateCACert creates a certificate which can be used as a trusted -// certificate authority. -func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { - subjectInfo := &certTemplateInfo{ - commonName: trustedKey.KeyID(), - isCA: true, - } - issuerInfo := &certTemplateInfo{ - commonName: signer.KeyID(), - } - - return generateCert(trustedKey, signer, subjectInfo, issuerInfo) -} - -// GenerateCACertPool creates a certificate authority pool to be used for a -// TLS configuration. Any self-signed certificates issued by the specified -// trusted keys will be verified during a TLS handshake -func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { - certPool := x509.NewCertPool() - - for _, trustedKey := range trustedKeys { - cert, err := GenerateCACert(signer, trustedKey) - if err != nil { - return nil, fmt.Errorf("failed to generate CA certificate: %s", err) - } - - certPool.AddCert(cert) - } - - return certPool, nil -} - -// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded -// containing one or more certificates. The expected pem type is "CERTIFICATE". -func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - certificates := []*x509.Certificate{} - var block *pem.Block - block, b = pem.Decode(b) - for ; block != nil; block, b = pem.Decode(b) { - if block.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - certificates = append(certificates, cert) - } else { - return nil, fmt.Errorf("invalid pem block type: %s", block.Type) - } - } - - return certificates, nil -} - -// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded -// containing one or more certificates. The expected pem type is "CERTIFICATE". -func LoadCertificatePool(filename string) (*x509.CertPool, error) { - certs, err := LoadCertificateBundle(filename) - if err != nil { - return nil, err - } - pool := x509.NewCertPool() - for _, cert := range certs { - pool.AddCert(cert) - } - return pool, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go deleted file mode 100644 index c111f353..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package libtrust - -import ( - "encoding/pem" - "io/ioutil" - "net" - "os" - "path" - "testing" -) - -func TestGenerateCertificates(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - _, err = GenerateSelfSignedServerCert(key, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}) - if err != nil { - t.Fatal(err) - } - - _, err = GenerateSelfSignedClientCert(key) - if err != nil { - t.Fatal(err) - } -} - -func TestGenerateCACertPool(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - caKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - caKey2, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - _, err = GenerateCACertPool(key, []PublicKey{caKey1.PublicKey(), caKey2.PublicKey()}) - if err != nil { - t.Fatal(err) - } -} - -func TestLoadCertificates(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - caKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - caKey2, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - cert1, err := GenerateCACert(caKey1, key) - if err != nil { - t.Fatal(err) - } - cert2, err := GenerateCACert(caKey2, key) - if err != nil { - t.Fatal(err) - } - - d, err := ioutil.TempDir("/tmp", "cert-test") - if err != nil { - t.Fatal(err) - } - caFile := path.Join(d, "ca.pem") - f, err := os.OpenFile(caFile, os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - t.Fatal(err) - } - - err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert1.Raw}) - if err != nil { - t.Fatal(err) - } - err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert2.Raw}) - if err != nil { - t.Fatal(err) - } - f.Close() - - certs, err := LoadCertificateBundle(caFile) - if err != nil { - t.Fatal(err) - } - if len(certs) != 2 { - t.Fatalf("Wrong number of certs received, expected: %d, received %d", 2, len(certs)) - } - - pool, err := LoadCertificatePool(caFile) - if err != nil { - t.Fatal(err) - } - - if len(pool.Subjects()) != 2 { - t.Fatalf("Invalid certificate pool") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/doc.go b/Godeps/_workspace/src/github.com/docker/libtrust/doc.go deleted file mode 100644 index ec5d2159..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -/* -Package libtrust provides an interface for managing authentication and -authorization using public key cryptography. Authentication is handled -using the identity attached to the public key and verified through TLS -x509 certificates, a key challenge, or signature. Authorization and -access control is managed through a trust graph distributed between -both remote trust servers and locally cached and managed data. -*/ -package libtrust diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go deleted file mode 100644 index 00bbe4b3..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go +++ /dev/null @@ -1,428 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" -) - -/* - * EC DSA PUBLIC KEY - */ - -// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital -// signature algorithms. -type ecPublicKey struct { - *ecdsa.PublicKey - curveName string - signatureAlgorithm *signatureAlgorithm - extended map[string]interface{} -} - -func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { - curve := cryptoPublicKey.Curve - - switch { - case curve == elliptic.P256(): - return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil - case curve == elliptic.P384(): - return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil - case curve == elliptic.P521(): - return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil - default: - return nil, errors.New("unsupported elliptic curve") - } -} - -// KeyType returns the key type for elliptic curve keys, i.e., "EC". -func (k *ecPublicKey) KeyType() string { - return "EC" -} - -// CurveName returns the elliptic curve identifier. -// Possible values are "P-256", "P-384", and "P-521". -func (k *ecPublicKey) CurveName() string { - return k.curveName -} - -// KeyID returns a distinct identifier which is unique to this Public Key. -func (k *ecPublicKey) KeyID() string { - return keyIDFromCryptoKey(k) -} - -func (k *ecPublicKey) String() string { - return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) -} - -// Verify verifyies the signature of the data in the io.Reader using this -// PublicKey. The alg parameter should identify the digital signature -// algorithm which was used to produce the signature and should be supported -// by this public key. Returns a nil error if the signature is valid. -func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { - // For EC keys there is only one supported signature algorithm depending - // on the curve parameters. - if k.signatureAlgorithm.HeaderParam() != alg { - return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) - } - - // signature is the concatenation of (r, s), base64Url encoded. - sigLength := len(signature) - expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) - if sigLength != expectedOctetLength { - return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) - } - - rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] - r := new(big.Int).SetBytes(rBytes) - s := new(big.Int).SetBytes(sBytes) - - hasher := k.signatureAlgorithm.HashID().New() - _, err := io.Copy(hasher, data) - if err != nil { - return fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - if !ecdsa.Verify(k.PublicKey, hash, r, s) { - return errors.New("invalid signature") - } - - return nil -} - -// CryptoPublicKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { - return k.PublicKey -} - -func (k *ecPublicKey) toMap() map[string]interface{} { - jwk := make(map[string]interface{}) - for k, v := range k.extended { - jwk[k] = v - } - jwk["kty"] = k.KeyType() - jwk["kid"] = k.KeyID() - jwk["crv"] = k.CurveName() - - xBytes := k.X.Bytes() - yBytes := k.Y.Bytes() - octetLength := (k.Params().BitSize + 7) >> 3 - // MUST include leading zeros in the output so that x, y are each - // *octetLength* bytes long. - xBuf := make([]byte, octetLength-len(xBytes), octetLength) - yBuf := make([]byte, octetLength-len(yBytes), octetLength) - xBuf = append(xBuf, xBytes...) - yBuf = append(yBuf, yBytes...) - - jwk["x"] = joseBase64UrlEncode(xBuf) - jwk["y"] = joseBase64UrlEncode(yBuf) - - return jwk -} - -// MarshalJSON serializes this Public Key using the JWK JSON serialization format for -// elliptic curve keys. -func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Public Key to DER-encoded PKIX format. -func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) - } - k.extended["kid"] = k.KeyID() // For display purposes. - return createPemBlock("PUBLIC KEY", derBytes, k.extended) -} - -func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { - k.extended[field] = value -} - -func (k *ecPublicKey) GetExtendedField(field string) interface{} { - v, ok := k.extended[field] - if !ok { - return nil - } - return v -} - -func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { - // JWK key type (kty) has already been determined to be "EC". - // Need to extract 'crv', 'x', 'y', and 'kid' and check for - // consistency. - - // Get the curve identifier value. - crv, err := stringFromMap(jwk, "crv") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) - } - - var ( - curve elliptic.Curve - sigAlg *signatureAlgorithm - ) - - switch { - case crv == "P-256": - curve = elliptic.P256() - sigAlg = es256 - case crv == "P-384": - curve = elliptic.P384() - sigAlg = es384 - case crv == "P-521": - curve = elliptic.P521() - sigAlg = es512 - default: - return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) - } - - // Get the X and Y coordinates for the public key point. - xB64Url, err := stringFromMap(jwk, "x") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) - } - x, err := parseECCoordinate(xB64Url, curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) - } - - yB64Url, err := stringFromMap(jwk, "y") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) - } - y, err := parseECCoordinate(yB64Url, curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) - } - - key := &ecPublicKey{ - PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, - curveName: crv, signatureAlgorithm: sigAlg, - } - - // Key ID is optional too, but if it exists, it should match the key. - _, ok := jwk["kid"] - if ok { - kid, err := stringFromMap(jwk, "kid") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) - } - if kid != key.KeyID() { - return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) - } - } - - key.extended = jwk - - return key, nil -} - -/* - * EC DSA PRIVATE KEY - */ - -// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature -// algorithms. -type ecPrivateKey struct { - ecPublicKey - *ecdsa.PrivateKey -} - -func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { - publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) - if err != nil { - return nil, err - } - - return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil -} - -// PublicKey returns the Public Key data associated with this Private Key. -func (k *ecPrivateKey) PublicKey() PublicKey { - return &k.ecPublicKey -} - -func (k *ecPrivateKey) String() string { - return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) -} - -// Sign signs the data read from the io.Reader using a signature algorithm supported -// by the elliptic curve private key. If the specified hashing algorithm is -// supported by this key, that hash function is used to generate the signature -// otherwise the the default hashing algorithm for this key is used. Returns -// the signature and the name of the JWK signature algorithm used, e.g., -// "ES256", "ES384", "ES512". -func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { - // Generate a signature of the data using the internal alg. - // The given hashId is only a suggestion, and since EC keys only support - // on signature/hash algorithm given the curve name, we disregard it for - // the elliptic curve JWK signature implementation. - hasher := k.signatureAlgorithm.HashID().New() - _, err = io.Copy(hasher, data) - if err != nil { - return nil, "", fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) - if err != nil { - return nil, "", fmt.Errorf("error producing signature: %s", err) - } - rBytes, sBytes := r.Bytes(), s.Bytes() - octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 - // MUST include leading zeros in the output - rBuf := make([]byte, octetLength-len(rBytes), octetLength) - sBuf := make([]byte, octetLength-len(sBytes), octetLength) - - rBuf = append(rBuf, rBytes...) - sBuf = append(sBuf, sBytes...) - - signature = append(rBuf, sBuf...) - alg = k.signatureAlgorithm.HeaderParam() - - return -} - -// CryptoPrivateKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { - return k.PrivateKey -} - -func (k *ecPrivateKey) toMap() map[string]interface{} { - jwk := k.ecPublicKey.toMap() - - dBytes := k.D.Bytes() - // The length of this octet string MUST be ceiling(log-base-2(n)/8) - // octets (where n is the order of the curve). This is because the private - // key d must be in the interval [1, n-1] so the bitlength of d should be - // no larger than the bitlength of n-1. The easiest way to find the octet - // length is to take bitlength(n-1), add 7 to force a carry, and shift this - // bit sequence right by 3, which is essentially dividing by 8 and adding - // 1 if there is any remainder. Thus, the private key value d should be - // output to (bitlength(n-1)+7)>>3 octets. - n := k.ecPublicKey.Params().N - octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 - // Create a buffer with the necessary zero-padding. - dBuf := make([]byte, octetLength-len(dBytes), octetLength) - dBuf = append(dBuf, dBytes...) - - jwk["d"] = joseBase64UrlEncode(dBuf) - - return jwk -} - -// MarshalJSON serializes this Private Key using the JWK JSON serialization format for -// elliptic curve keys. -func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Private Key to DER-encoded PKIX format. -func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) - } - k.extended["keyID"] = k.KeyID() // For display purposes. - return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) -} - -func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { - dB64Url, err := stringFromMap(jwk, "d") - if err != nil { - return nil, fmt.Errorf("JWK EC Private Key: %s", err) - } - - // JWK key type (kty) has already been determined to be "EC". - // Need to extract the public key information, then extract the private - // key value 'd'. - publicKey, err := ecPublicKeyFromMap(jwk) - if err != nil { - return nil, err - } - - d, err := parseECPrivateParam(dB64Url, publicKey.Curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) - } - - key := &ecPrivateKey{ - ecPublicKey: *publicKey, - PrivateKey: &ecdsa.PrivateKey{ - PublicKey: *publicKey.PublicKey, - D: d, - }, - } - - return key, nil -} - -/* - * Key Generation Functions. - */ - -func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { - k = new(ecPrivateKey) - k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) - if err != nil { - return nil, err - } - - k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey - k.extended = make(map[string]interface{}) - - return -} - -// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. -func GenerateECP256PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P256()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-256 key: %s", err) - } - - k.curveName = "P-256" - k.signatureAlgorithm = es256 - - return k, nil -} - -// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. -func GenerateECP384PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P384()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-384 key: %s", err) - } - - k.curveName = "P-384" - k.signatureAlgorithm = es384 - - return k, nil -} - -// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. -func GenerateECP521PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P521()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-521 key: %s", err) - } - - k.curveName = "P-521" - k.signatureAlgorithm = es512 - - return k, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go deleted file mode 100644 index 26ac3814..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package libtrust - -import ( - "bytes" - "encoding/json" - "testing" -) - -func generateECTestKeys(t *testing.T) []PrivateKey { - p256Key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - p384Key, err := GenerateECP384PrivateKey() - if err != nil { - t.Fatal(err) - } - - p521Key, err := GenerateECP521PrivateKey() - if err != nil { - t.Fatal(err) - } - - return []PrivateKey{p256Key, p384Key, p521Key} -} - -func TestECKeys(t *testing.T) { - ecKeys := generateECTestKeys(t) - - for _, ecKey := range ecKeys { - if ecKey.KeyType() != "EC" { - t.Fatalf("key type must be %q, instead got %q", "EC", ecKey.KeyType()) - } - } -} - -func TestECSignVerify(t *testing.T) { - ecKeys := generateECTestKeys(t) - - message := "Hello, World!" - data := bytes.NewReader([]byte(message)) - - sigAlgs := []*signatureAlgorithm{es256, es384, es512} - - for i, ecKey := range ecKeys { - sigAlg := sigAlgs[i] - - t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, ecKey.KeyID()) - - data.Seek(0, 0) // Reset the byte reader - - // Sign - sig, alg, err := ecKey.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - - // Verify - err = ecKey.Verify(data, alg, sig) - if err != nil { - t.Fatal(err) - } - } -} - -func TestMarshalUnmarshalECKeys(t *testing.T) { - ecKeys := generateECTestKeys(t) - data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) - sigAlgs := []*signatureAlgorithm{es256, es384, es512} - - for i, ecKey := range ecKeys { - sigAlg := sigAlgs[i] - privateJWKJSON, err := json.MarshalIndent(ecKey, "", " ") - if err != nil { - t.Fatal(err) - } - - publicJWKJSON, err := json.MarshalIndent(ecKey.PublicKey(), "", " ") - if err != nil { - t.Fatal(err) - } - - t.Logf("JWK Private Key: %s", string(privateJWKJSON)) - t.Logf("JWK Public Key: %s", string(publicJWKJSON)) - - privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) - if err != nil { - t.Fatal(err) - } - - pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) - if err != nil { - t.Fatal(err) - } - - // Ensure we can sign/verify a message with the unmarshalled keys. - data.Seek(0, 0) // Reset the byte reader - signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - err = pubKey2.Verify(data, alg, signature) - if err != nil { - t.Fatal(err) - } - } -} - -func TestFromCryptoECKeys(t *testing.T) { - ecKeys := generateECTestKeys(t) - - for _, ecKey := range ecKeys { - cryptoPrivateKey := ecKey.CryptoPrivateKey() - cryptoPublicKey := ecKey.CryptoPublicKey() - - pubKey, err := FromCryptoPublicKey(cryptoPublicKey) - if err != nil { - t.Fatal(err) - } - - if pubKey.KeyID() != ecKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - - privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) - if err != nil { - t.Fatal(err) - } - - if privKey.KeyID() != ecKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - } -} - -func TestExtendedFields(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - key.AddExtendedField("test", "foobar") - val := key.GetExtendedField("test") - - gotVal, ok := val.(string) - if !ok { - t.Fatalf("value is not a string") - } else if gotVal != val { - t.Fatalf("value %q is not equal to %q", gotVal, val) - } - -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/filter.go b/Godeps/_workspace/src/github.com/docker/libtrust/filter.go deleted file mode 100644 index 5b2b4fca..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/filter.go +++ /dev/null @@ -1,50 +0,0 @@ -package libtrust - -import ( - "path/filepath" -) - -// FilterByHosts filters the list of PublicKeys to only those which contain a -// 'hosts' pattern which matches the given host. If *includeEmpty* is true, -// then keys which do not specify any hosts are also returned. -func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { - filtered := make([]PublicKey, 0, len(keys)) - - for _, pubKey := range keys { - var hosts []string - switch v := pubKey.GetExtendedField("hosts").(type) { - case []string: - hosts = v - case []interface{}: - for _, value := range v { - h, ok := value.(string) - if !ok { - continue - } - hosts = append(hosts, h) - } - } - - if len(hosts) == 0 { - if includeEmpty { - filtered = append(filtered, pubKey) - } - continue - } - - // Check if any hosts match pattern - for _, hostPattern := range hosts { - match, err := filepath.Match(hostPattern, host) - if err != nil { - return nil, err - } - - if match { - filtered = append(filtered, pubKey) - continue - } - } - } - - return filtered, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go deleted file mode 100644 index 997e554c..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package libtrust - -import ( - "testing" -) - -func compareKeySlices(t *testing.T, sliceA, sliceB []PublicKey) { - if len(sliceA) != len(sliceB) { - t.Fatalf("slice size %d, expected %d", len(sliceA), len(sliceB)) - } - - for i, itemA := range sliceA { - itemB := sliceB[i] - if itemA != itemB { - t.Fatalf("slice index %d not equal: %#v != %#v", i, itemA, itemB) - } - } -} - -func TestFilter(t *testing.T) { - keys := make([]PublicKey, 0, 8) - - // Create 8 keys and add host entries. - for i := 0; i < cap(keys); i++ { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - // we use both []interface{} and []string here because jwt uses - // []interface{} format, while PEM uses []string - switch { - case i == 0: - // Don't add entries for this key, key 0. - break - case i%2 == 0: - // Should catch keys 2, 4, and 6. - key.AddExtendedField("hosts", []interface{}{"*.even.example.com"}) - case i == 7: - // Should catch only the last key, and make it match any hostname. - key.AddExtendedField("hosts", []string{"*"}) - default: - // should catch keys 1, 3, 5. - key.AddExtendedField("hosts", []string{"*.example.com"}) - } - - keys = append(keys, key) - } - - // Should match 2 keys, the empty one, and the one that matches all hosts. - matchedKeys, err := FilterByHosts(keys, "foo.bar.com", true) - if err != nil { - t.Fatal(err) - } - expectedMatch := []PublicKey{keys[0], keys[7]} - compareKeySlices(t, expectedMatch, matchedKeys) - - // Should match 1 key, the one that matches any host. - matchedKeys, err = FilterByHosts(keys, "foo.bar.com", false) - if err != nil { - t.Fatal(err) - } - expectedMatch = []PublicKey{keys[7]} - compareKeySlices(t, expectedMatch, matchedKeys) - - // Should match keys that end in "example.com", and the key that matches anything. - matchedKeys, err = FilterByHosts(keys, "foo.example.com", false) - if err != nil { - t.Fatal(err) - } - expectedMatch = []PublicKey{keys[1], keys[3], keys[5], keys[7]} - compareKeySlices(t, expectedMatch, matchedKeys) - - // Should match all of the keys except the empty key. - matchedKeys, err = FilterByHosts(keys, "foo.even.example.com", false) - if err != nil { - t.Fatal(err) - } - expectedMatch = keys[1:] - compareKeySlices(t, expectedMatch, matchedKeys) -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/hash.go b/Godeps/_workspace/src/github.com/docker/libtrust/hash.go deleted file mode 100644 index a2df787d..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/hash.go +++ /dev/null @@ -1,56 +0,0 @@ -package libtrust - -import ( - "crypto" - _ "crypto/sha256" // Registrer SHA224 and SHA256 - _ "crypto/sha512" // Registrer SHA384 and SHA512 - "fmt" -) - -type signatureAlgorithm struct { - algHeaderParam string - hashID crypto.Hash -} - -func (h *signatureAlgorithm) HeaderParam() string { - return h.algHeaderParam -} - -func (h *signatureAlgorithm) HashID() crypto.Hash { - return h.hashID -} - -var ( - rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} - rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} - rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} - es256 = &signatureAlgorithm{"ES256", crypto.SHA256} - es384 = &signatureAlgorithm{"ES384", crypto.SHA384} - es512 = &signatureAlgorithm{"ES512", crypto.SHA512} -) - -func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { - switch { - case alg == "RS256": - return rs256, nil - case alg == "RS384": - return rs384, nil - case alg == "RS512": - return rs512, nil - default: - return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) - } -} - -func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { - switch { - case hashID == crypto.SHA512: - return rs512 - case hashID == crypto.SHA384: - return rs384 - case hashID == crypto.SHA256: - fallthrough - default: - return rs256 - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go deleted file mode 100644 index cb2ca9a7..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go +++ /dev/null @@ -1,657 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "sort" - "time" - "unicode" -) - -var ( - // ErrInvalidSignContent is used when the content to be signed is invalid. - ErrInvalidSignContent = errors.New("invalid sign content") - - // ErrInvalidJSONContent is used when invalid json is encountered. - ErrInvalidJSONContent = errors.New("invalid json content") - - // ErrMissingSignatureKey is used when the specified signature key - // does not exist in the JSON content. - ErrMissingSignatureKey = errors.New("missing signature key") -) - -type jsHeader struct { - JWK PublicKey `json:"jwk,omitempty"` - Algorithm string `json:"alg"` - Chain []string `json:"x5c,omitempty"` -} - -type jsSignature struct { - Header jsHeader `json:"header"` - Signature string `json:"signature"` - Protected string `json:"protected,omitempty"` -} - -type jsSignaturesSorted []jsSignature - -func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } -func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } - -func (jsbkid jsSignaturesSorted) Less(i, j int) bool { - ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() - si, sj := jsbkid[i].Signature, jsbkid[j].Signature - - if ki == kj { - return si < sj - } - - return ki < kj -} - -type signKey struct { - PrivateKey - Chain []*x509.Certificate -} - -// JSONSignature represents a signature of a json object. -type JSONSignature struct { - payload string - signatures []jsSignature - indent string - formatLength int - formatTail []byte -} - -func newJSONSignature() *JSONSignature { - return &JSONSignature{ - signatures: make([]jsSignature, 0, 1), - } -} - -// Payload returns the encoded payload of the signature. This -// payload should not be signed directly -func (js *JSONSignature) Payload() ([]byte, error) { - return joseBase64UrlDecode(js.payload) -} - -func (js *JSONSignature) protectedHeader() (string, error) { - protected := map[string]interface{}{ - "formatLength": js.formatLength, - "formatTail": joseBase64UrlEncode(js.formatTail), - "time": time.Now().UTC().Format(time.RFC3339), - } - protectedBytes, err := json.Marshal(protected) - if err != nil { - return "", err - } - - return joseBase64UrlEncode(protectedBytes), nil -} - -func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { - buf := make([]byte, len(js.payload)+len(protectedHeader)+1) - copy(buf, protectedHeader) - buf[len(protectedHeader)] = '.' - copy(buf[len(protectedHeader)+1:], js.payload) - return buf, nil -} - -// Sign adds a signature using the given private key. -func (js *JSONSignature) Sign(key PrivateKey) error { - protected, err := js.protectedHeader() - if err != nil { - return err - } - signBytes, err := js.signBytes(protected) - if err != nil { - return err - } - sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) - if err != nil { - return err - } - - js.signatures = append(js.signatures, jsSignature{ - Header: jsHeader{ - JWK: key.PublicKey(), - Algorithm: algorithm, - }, - Signature: joseBase64UrlEncode(sigBytes), - Protected: protected, - }) - - return nil -} - -// SignWithChain adds a signature using the given private key -// and setting the x509 chain. The public key of the first element -// in the chain must be the public key corresponding with the sign key. -func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { - // Ensure key.Chain[0] is public key for key - //key.Chain.PublicKey - //key.PublicKey().CryptoPublicKey() - - // Verify chain - protected, err := js.protectedHeader() - if err != nil { - return err - } - signBytes, err := js.signBytes(protected) - if err != nil { - return err - } - sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) - if err != nil { - return err - } - - header := jsHeader{ - Chain: make([]string, len(chain)), - Algorithm: algorithm, - } - - for i, cert := range chain { - header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) - } - - js.signatures = append(js.signatures, jsSignature{ - Header: header, - Signature: joseBase64UrlEncode(sigBytes), - Protected: protected, - }) - - return nil -} - -// Verify verifies all the signatures and returns the list of -// public keys used to sign. Any x509 chains are not checked. -func (js *JSONSignature) Verify() ([]PublicKey, error) { - keys := make([]PublicKey, len(js.signatures)) - for i, signature := range js.signatures { - signBytes, err := js.signBytes(signature.Protected) - if err != nil { - return nil, err - } - var publicKey PublicKey - if len(signature.Header.Chain) > 0 { - certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) - if err != nil { - return nil, err - } - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - publicKey, err = FromCryptoPublicKey(cert.PublicKey) - if err != nil { - return nil, err - } - } else if signature.Header.JWK != nil { - publicKey = signature.Header.JWK - } else { - return nil, errors.New("missing public key") - } - - sigBytes, err := joseBase64UrlDecode(signature.Signature) - if err != nil { - return nil, err - } - - err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) - if err != nil { - return nil, err - } - - keys[i] = publicKey - } - return keys, nil -} - -// VerifyChains verifies all the signatures and the chains associated -// with each signature and returns the list of verified chains. -// Signatures without an x509 chain are not checked. -func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { - chains := make([][]*x509.Certificate, 0, len(js.signatures)) - for _, signature := range js.signatures { - signBytes, err := js.signBytes(signature.Protected) - if err != nil { - return nil, err - } - var publicKey PublicKey - if len(signature.Header.Chain) > 0 { - certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) - if err != nil { - return nil, err - } - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - publicKey, err = FromCryptoPublicKey(cert.PublicKey) - if err != nil { - return nil, err - } - intermediates := x509.NewCertPool() - if len(signature.Header.Chain) > 1 { - intermediateChain := signature.Header.Chain[1:] - for i := range intermediateChain { - certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) - if err != nil { - return nil, err - } - intermediate, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - intermediates.AddCert(intermediate) - } - } - - verifyOptions := x509.VerifyOptions{ - Intermediates: intermediates, - Roots: ca, - } - - verifiedChains, err := cert.Verify(verifyOptions) - if err != nil { - return nil, err - } - chains = append(chains, verifiedChains...) - - sigBytes, err := joseBase64UrlDecode(signature.Signature) - if err != nil { - return nil, err - } - - err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) - if err != nil { - return nil, err - } - } - - } - return chains, nil -} - -// JWS returns JSON serialized JWS according to -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 -func (js *JSONSignature) JWS() ([]byte, error) { - if len(js.signatures) == 0 { - return nil, errors.New("missing signature") - } - - sort.Sort(jsSignaturesSorted(js.signatures)) - - jsonMap := map[string]interface{}{ - "payload": js.payload, - "signatures": js.signatures, - } - - return json.MarshalIndent(jsonMap, "", " ") -} - -func notSpace(r rune) bool { - return !unicode.IsSpace(r) -} - -func detectJSONIndent(jsonContent []byte) (indent string) { - if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { - quoteIndex := bytes.IndexRune(jsonContent[1:], '"') - if quoteIndex > 0 { - indent = string(jsonContent[2 : quoteIndex+1]) - } - } - return -} - -type jsParsedHeader struct { - JWK json.RawMessage `json:"jwk"` - Algorithm string `json:"alg"` - Chain []string `json:"x5c"` -} - -type jsParsedSignature struct { - Header jsParsedHeader `json:"header"` - Signature string `json:"signature"` - Protected string `json:"protected"` -} - -// ParseJWS parses a JWS serialized JSON object into a Json Signature. -func ParseJWS(content []byte) (*JSONSignature, error) { - type jsParsed struct { - Payload string `json:"payload"` - Signatures []jsParsedSignature `json:"signatures"` - } - parsed := &jsParsed{} - err := json.Unmarshal(content, parsed) - if err != nil { - return nil, err - } - if len(parsed.Signatures) == 0 { - return nil, errors.New("missing signatures") - } - payload, err := joseBase64UrlDecode(parsed.Payload) - if err != nil { - return nil, err - } - - js, err := NewJSONSignature(payload) - if err != nil { - return nil, err - } - js.signatures = make([]jsSignature, len(parsed.Signatures)) - for i, signature := range parsed.Signatures { - header := jsHeader{ - Algorithm: signature.Header.Algorithm, - } - if signature.Header.Chain != nil { - header.Chain = signature.Header.Chain - } - if signature.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) - if err != nil { - return nil, err - } - header.JWK = publicKey - } - js.signatures[i] = jsSignature{ - Header: header, - Signature: signature.Signature, - Protected: signature.Protected, - } - } - - return js, nil -} - -// NewJSONSignature returns a new unsigned JWS from a json byte array. -// JSONSignature will need to be signed before serializing or storing. -// Optionally, one or more signatures can be provided as byte buffers, -// containing serialized JWS signatures, to assemble a fully signed JWS -// package. It is the callers responsibility to ensure uniqueness of the -// provided signatures. -func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { - var dataMap map[string]interface{} - err := json.Unmarshal(content, &dataMap) - if err != nil { - return nil, err - } - - js := newJSONSignature() - js.indent = detectJSONIndent(content) - - js.payload = joseBase64UrlEncode(content) - - // Find trailing } and whitespace, put in protected header - closeIndex := bytes.LastIndexFunc(content, notSpace) - if content[closeIndex] != '}' { - return nil, ErrInvalidJSONContent - } - lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) - if content[lastRuneIndex] == ',' { - return nil, ErrInvalidJSONContent - } - js.formatLength = lastRuneIndex + 1 - js.formatTail = content[js.formatLength:] - - if len(signatures) > 0 { - for _, signature := range signatures { - var parsedJSig jsParsedSignature - - if err := json.Unmarshal(signature, &parsedJSig); err != nil { - return nil, err - } - - // TODO(stevvooe): A lot of the code below is repeated in - // ParseJWS. It will require more refactoring to fix that. - jsig := jsSignature{ - Header: jsHeader{ - Algorithm: parsedJSig.Header.Algorithm, - }, - Signature: parsedJSig.Signature, - Protected: parsedJSig.Protected, - } - - if parsedJSig.Header.Chain != nil { - jsig.Header.Chain = parsedJSig.Header.Chain - } - - if parsedJSig.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) - if err != nil { - return nil, err - } - jsig.Header.JWK = publicKey - } - - js.signatures = append(js.signatures, jsig) - } - } - - return js, nil -} - -// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or -// struct. JWS will need to be signed before serializing or storing. -func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { - switch content.(type) { - case map[string]interface{}: - case struct{}: - default: - return nil, errors.New("invalid data type") - } - - js := newJSONSignature() - js.indent = " " - - payload, err := json.MarshalIndent(content, "", js.indent) - if err != nil { - return nil, err - } - js.payload = joseBase64UrlEncode(payload) - - // Remove '\n}' from formatted section, put in protected header - js.formatLength = len(payload) - 2 - js.formatTail = payload[js.formatLength:] - - return js, nil -} - -func readIntFromMap(key string, m map[string]interface{}) (int, bool) { - value, ok := m[key] - if !ok { - return 0, false - } - switch v := value.(type) { - case int: - return v, true - case float64: - return int(v), true - default: - return 0, false - } -} - -func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { - value, ok := m[key] - if !ok { - return "", false - } - v, ok = value.(string) - return -} - -// ParsePrettySignature parses a formatted signature into a -// JSON signature. If the signatures are missing the format information -// an error is thrown. The formatted signature must be created by -// the same method as format signature. -func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { - var contentMap map[string]json.RawMessage - err := json.Unmarshal(content, &contentMap) - if err != nil { - return nil, fmt.Errorf("error unmarshalling content: %s", err) - } - sigMessage, ok := contentMap[signatureKey] - if !ok { - return nil, ErrMissingSignatureKey - } - - var signatureBlocks []jsParsedSignature - err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) - if err != nil { - return nil, fmt.Errorf("error unmarshalling signatures: %s", err) - } - - js := newJSONSignature() - js.signatures = make([]jsSignature, len(signatureBlocks)) - - for i, signatureBlock := range signatureBlocks { - protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) - if err != nil { - return nil, fmt.Errorf("base64 decode error: %s", err) - } - var protectedHeader map[string]interface{} - err = json.Unmarshal(protectedBytes, &protectedHeader) - if err != nil { - return nil, fmt.Errorf("error unmarshalling protected header: %s", err) - } - - formatLength, ok := readIntFromMap("formatLength", protectedHeader) - if !ok { - return nil, errors.New("missing formatted length") - } - encodedTail, ok := readStringFromMap("formatTail", protectedHeader) - if !ok { - return nil, errors.New("missing formatted tail") - } - formatTail, err := joseBase64UrlDecode(encodedTail) - if err != nil { - return nil, fmt.Errorf("base64 decode error on tail: %s", err) - } - if js.formatLength == 0 { - js.formatLength = formatLength - } else if js.formatLength != formatLength { - return nil, errors.New("conflicting format length") - } - if len(js.formatTail) == 0 { - js.formatTail = formatTail - } else if bytes.Compare(js.formatTail, formatTail) != 0 { - return nil, errors.New("conflicting format tail") - } - - header := jsHeader{ - Algorithm: signatureBlock.Header.Algorithm, - Chain: signatureBlock.Header.Chain, - } - if signatureBlock.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) - if err != nil { - return nil, fmt.Errorf("error unmarshalling public key: %s", err) - } - header.JWK = publicKey - } - js.signatures[i] = jsSignature{ - Header: header, - Signature: signatureBlock.Signature, - Protected: signatureBlock.Protected, - } - } - if js.formatLength > len(content) { - return nil, errors.New("invalid format length") - } - formatted := make([]byte, js.formatLength+len(js.formatTail)) - copy(formatted, content[:js.formatLength]) - copy(formatted[js.formatLength:], js.formatTail) - js.indent = detectJSONIndent(formatted) - js.payload = joseBase64UrlEncode(formatted) - - return js, nil -} - -// PrettySignature formats a json signature into an easy to read -// single json serialized object. -func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { - if len(js.signatures) == 0 { - return nil, errors.New("no signatures") - } - payload, err := joseBase64UrlDecode(js.payload) - if err != nil { - return nil, err - } - payload = payload[:js.formatLength] - - sort.Sort(jsSignaturesSorted(js.signatures)) - - var marshalled []byte - var marshallErr error - if js.indent != "" { - marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) - } else { - marshalled, marshallErr = json.Marshal(js.signatures) - } - if marshallErr != nil { - return nil, marshallErr - } - - buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) - buf.Write(payload) - buf.WriteByte(',') - if js.indent != "" { - buf.WriteByte('\n') - buf.WriteString(js.indent) - buf.WriteByte('"') - buf.WriteString(signatureKey) - buf.WriteString("\": ") - buf.Write(marshalled) - buf.WriteByte('\n') - } else { - buf.WriteByte('"') - buf.WriteString(signatureKey) - buf.WriteString("\":") - buf.Write(marshalled) - } - buf.WriteByte('}') - - return buf.Bytes(), nil -} - -// Signatures provides the signatures on this JWS as opaque blobs, sorted by -// keyID. These blobs can be stored and reassembled with payloads. Internally, -// they are simply marshaled json web signatures but implementations should -// not rely on this. -func (js *JSONSignature) Signatures() ([][]byte, error) { - sort.Sort(jsSignaturesSorted(js.signatures)) - - var sb [][]byte - for _, jsig := range js.signatures { - p, err := json.Marshal(jsig) - if err != nil { - return nil, err - } - - sb = append(sb, p) - } - - return sb, nil -} - -// Merge combines the signatures from one or more other signatures into the -// method receiver. If the payloads differ for any argument, an error will be -// returned and the receiver will not be modified. -func (js *JSONSignature) Merge(others ...*JSONSignature) error { - merged := js.signatures - for _, other := range others { - if js.payload != other.payload { - return fmt.Errorf("payloads differ from merge target") - } - merged = append(merged, other.signatures...) - } - - js.signatures = merged - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go deleted file mode 100644 index b4f26979..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go +++ /dev/null @@ -1,380 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto/rand" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "testing" - - "github.com/docker/libtrust/testutil" -) - -func createTestJSON(sigKey string, indent string) (map[string]interface{}, []byte) { - testMap := map[string]interface{}{ - "name": "dmcgowan/mycontainer", - "config": map[string]interface{}{ - "ports": []int{9101, 9102}, - "run": "/bin/echo \"Hello\"", - }, - "layers": []string{ - "2893c080-27f5-11e4-8c21-0800200c9a66", - "c54bc25b-fbb2-497b-a899-a8bc1b5b9d55", - "4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4", - "0b6da891-7f7f-4abf-9c97-7887549e696c", - "1d960389-ae4f-4011-85fd-18d0f96a67ad", - }, - } - formattedSection := `{"config":{"ports":[9101,9102],"run":"/bin/echo \"Hello\""},"layers":["2893c080-27f5-11e4-8c21-0800200c9a66","c54bc25b-fbb2-497b-a899-a8bc1b5b9d55","4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4","0b6da891-7f7f-4abf-9c97-7887549e696c","1d960389-ae4f-4011-85fd-18d0f96a67ad"],"name":"dmcgowan/mycontainer","%s":[{"header":{` - formattedSection = fmt.Sprintf(formattedSection, sigKey) - if indent != "" { - buf := bytes.NewBuffer(nil) - json.Indent(buf, []byte(formattedSection), "", indent) - return testMap, buf.Bytes() - } - return testMap, []byte(formattedSection) - -} - -func TestSignJSON(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, _ := createTestJSON("buildSignatures", " ") - indented, err := json.MarshalIndent(testMap, "", " ") - if err != nil { - t.Fatalf("Marshall error: %s", err) - } - - js, err := NewJSONSignature(indented) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing content: %s", err) - } - - keys, err := js.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } - -} - -func TestSignMap(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, _ := createTestJSON("buildSignatures", " ") - js, err := NewJSONSignatureFromMap(testMap) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing JSON signature: %s", err) - } - - keys, err := js.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } -} - -func TestFormattedJson(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, firstSection := createTestJSON("buildSignatures", " ") - indented, err := json.MarshalIndent(testMap, "", " ") - if err != nil { - t.Fatalf("Marshall error: %s", err) - } - - js, err := NewJSONSignature(indented) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing content: %s", err) - } - - b, err := js.PrettySignature("buildSignatures") - if err != nil { - t.Fatalf("Error signing map: %s", err) - } - - if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { - t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) - } - - parsed, err := ParsePrettySignature(b, "buildSignatures") - if err != nil { - t.Fatalf("Error parsing formatted signature: %s", err) - } - - keys, err := parsed.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } - - var unmarshalled map[string]interface{} - err = json.Unmarshal(b, &unmarshalled) - if err != nil { - t.Fatalf("Could not unmarshall after parse: %s", err) - } - -} - -func TestFormattedFlatJson(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, firstSection := createTestJSON("buildSignatures", "") - unindented, err := json.Marshal(testMap) - if err != nil { - t.Fatalf("Marshall error: %s", err) - } - - js, err := NewJSONSignature(unindented) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing JSON signature: %s", err) - } - - b, err := js.PrettySignature("buildSignatures") - if err != nil { - t.Fatalf("Error signing map: %s", err) - } - - if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { - t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) - } - - parsed, err := ParsePrettySignature(b, "buildSignatures") - if err != nil { - t.Fatalf("Error parsing formatted signature: %s", err) - } - - keys, err := parsed.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } -} - -func generateTrustChain(t *testing.T, key PrivateKey, ca *x509.Certificate) (PrivateKey, []*x509.Certificate) { - parent := ca - parentKey := key - chain := make([]*x509.Certificate, 6) - for i := 5; i > 0; i-- { - intermediatekey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generate key: %s", err) - } - chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) - if err != nil { - t.Fatalf("Error generating intermdiate certificate: %s", err) - } - parent = chain[i] - parentKey = intermediatekey - } - trustKey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generate key: %s", err) - } - chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) - if err != nil { - t.Fatalf("Error generate trust cert: %s", err) - } - - return trustKey, chain -} - -func TestChainVerify(t *testing.T) { - caKey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating key: %s", err) - } - ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) - if err != nil { - t.Fatalf("Error generating ca: %s", err) - } - trustKey, chain := generateTrustChain(t, caKey, ca) - - testMap, _ := createTestJSON("verifySignatures", " ") - js, err := NewJSONSignatureFromMap(testMap) - if err != nil { - t.Fatalf("Error creating JSONSignature from map: %s", err) - } - - err = js.SignWithChain(trustKey, chain) - if err != nil { - t.Fatalf("Error signing with chain: %s", err) - } - - pool := x509.NewCertPool() - pool.AddCert(ca) - chains, err := js.VerifyChains(pool) - if err != nil { - t.Fatalf("Error verifying content: %s", err) - } - if len(chains) != 1 { - t.Fatalf("Unexpected chains length: %d", len(chains)) - } - if len(chains[0]) != 7 { - t.Fatalf("Unexpected chain length: %d", len(chains[0])) - } -} - -func TestInvalidChain(t *testing.T) { - caKey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating key: %s", err) - } - ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) - if err != nil { - t.Fatalf("Error generating ca: %s", err) - } - trustKey, chain := generateTrustChain(t, caKey, ca) - - testMap, _ := createTestJSON("verifySignatures", " ") - js, err := NewJSONSignatureFromMap(testMap) - if err != nil { - t.Fatalf("Error creating JSONSignature from map: %s", err) - } - - err = js.SignWithChain(trustKey, chain[:5]) - if err != nil { - t.Fatalf("Error signing with chain: %s", err) - } - - pool := x509.NewCertPool() - pool.AddCert(ca) - chains, err := js.VerifyChains(pool) - if err == nil { - t.Fatalf("Expected error verifying with bad chain") - } - if len(chains) != 0 { - t.Fatalf("Unexpected chains returned from invalid verify") - } -} - -func TestMergeSignatures(t *testing.T) { - pk1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key 1: %v", err) - } - - pk2, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key 2: %v", err) - } - - payload := make([]byte, 1<<10) - if _, err = io.ReadFull(rand.Reader, payload); err != nil { - t.Fatalf("error generating payload: %v", err) - } - - payload, _ = json.Marshal(map[string]interface{}{"data": payload}) - - sig1, err := NewJSONSignature(payload) - if err != nil { - t.Fatalf("unexpected error creating signature 1: %v", err) - } - - if err := sig1.Sign(pk1); err != nil { - t.Fatalf("unexpected error signing with pk1: %v", err) - } - - sig2, err := NewJSONSignature(payload) - if err != nil { - t.Fatalf("unexpected error creating signature 2: %v", err) - } - - if err := sig2.Sign(pk2); err != nil { - t.Fatalf("unexpected error signing with pk2: %v", err) - } - - // Now, we actually merge into sig1 - if err := sig1.Merge(sig2); err != nil { - t.Fatalf("unexpected error merging: %v", err) - } - - // Verify the new signature package - pubkeys, err := sig1.Verify() - if err != nil { - t.Fatalf("unexpected error during verify: %v", err) - } - - // Make sure the pubkeys match the two private keys from before - privkeys := map[string]PrivateKey{ - pk1.KeyID(): pk1, - pk2.KeyID(): pk2, - } - - found := map[string]struct{}{} - - for _, pubkey := range pubkeys { - if _, ok := privkeys[pubkey.KeyID()]; !ok { - t.Fatalf("unexpected public key found during verification: %v", pubkey) - } - - found[pubkey.KeyID()] = struct{}{} - } - - // Make sure we've found all the private keys from verification - for keyid, _ := range privkeys { - if _, ok := found[keyid]; !ok { - t.Fatalf("public key %v not found during verification", keyid) - } - } - - // Create another signature, with a different payload, and ensure we get an error. - sig3, err := NewJSONSignature([]byte("{}")) - if err != nil { - t.Fatalf("unexpected error making signature for sig3: %v", err) - } - - if err := sig1.Merge(sig3); err == nil { - t.Fatalf("error expected during invalid merge with different payload") - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key.go b/Godeps/_workspace/src/github.com/docker/libtrust/key.go deleted file mode 100644 index 73642db2..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/key.go +++ /dev/null @@ -1,253 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" -) - -// PublicKey is a generic interface for a Public Key. -type PublicKey interface { - // KeyType returns the key type for this key. For elliptic curve keys, - // this value should be "EC". For RSA keys, this value should be "RSA". - KeyType() string - // KeyID returns a distinct identifier which is unique to this Public Key. - // The format generated by this library is a base32 encoding of a 240 bit - // hash of the public key data divided into 12 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - KeyID() string - // Verify verifyies the signature of the data in the io.Reader using this - // Public Key. The alg parameter should identify the digital signature - // algorithm which was used to produce the signature and should be - // supported by this public key. Returns a nil error if the signature - // is valid. - Verify(data io.Reader, alg string, signature []byte) error - // CryptoPublicKey returns the internal object which can be used as a - // crypto.PublicKey for use with other standard library operations. The type - // is either *rsa.PublicKey or *ecdsa.PublicKey - CryptoPublicKey() crypto.PublicKey - // These public keys can be serialized to the standard JSON encoding for - // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web - // Algorithms. - MarshalJSON() ([]byte, error) - // These keys can also be serialized to the standard PEM encoding. - PEMBlock() (*pem.Block, error) - // The string representation of a key is its key type and ID. - String() string - AddExtendedField(string, interface{}) - GetExtendedField(string) interface{} -} - -// PrivateKey is a generic interface for a Private Key. -type PrivateKey interface { - // A PrivateKey contains all fields and methods of a PublicKey of the - // same type. The MarshalJSON method also outputs the private key as a - // JSON Web Key, and the PEMBlock method outputs the private key as a - // PEM block. - PublicKey - // PublicKey returns the PublicKey associated with this PrivateKey. - PublicKey() PublicKey - // Sign signs the data read from the io.Reader using a signature algorithm - // supported by the private key. If the specified hashing algorithm is - // supported by this key, that hash function is used to generate the - // signature otherwise the the default hashing algorithm for this key is - // used. Returns the signature and identifier of the algorithm used. - Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) - // CryptoPrivateKey returns the internal object which can be used as a - // crypto.PublicKey for use with other standard library operations. The - // type is either *rsa.PublicKey or *ecdsa.PublicKey - CryptoPrivateKey() crypto.PrivateKey -} - -// FromCryptoPublicKey returns a libtrust PublicKey representation of the given -// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given -// key is of an unsupported type. -func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { - switch cryptoPublicKey := cryptoPublicKey.(type) { - case *ecdsa.PublicKey: - return fromECPublicKey(cryptoPublicKey) - case *rsa.PublicKey: - return fromRSAPublicKey(cryptoPublicKey), nil - default: - return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) - } -} - -// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given -// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given -// key is of an unsupported type. -func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { - switch cryptoPrivateKey := cryptoPrivateKey.(type) { - case *ecdsa.PrivateKey: - return fromECPrivateKey(cryptoPrivateKey) - case *rsa.PrivateKey: - return fromRSAPrivateKey(cryptoPrivateKey), nil - default: - return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) - } -} - -// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust -// PublicKey or an error if there is a problem with the encoding. -func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, errors.New("unable to find PEM encoded data") - } else if pemBlock.Type != "PUBLIC KEY" { - return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) - } - - return pubKeyFromPEMBlock(pemBlock) -} - -// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of -// PEM blocks appended one after the other and returns a slice of PublicKey -// objects that it finds. -func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { - pubKeys := []PublicKey{} - - for { - var pemBlock *pem.Block - pemBlock, data = pem.Decode(data) - if pemBlock == nil { - break - } else if pemBlock.Type != "PUBLIC KEY" { - return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) - } - - pubKey, err := pubKeyFromPEMBlock(pemBlock) - if err != nil { - return nil, err - } - - pubKeys = append(pubKeys, pubKey) - } - - return pubKeys, nil -} - -// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust -// PrivateKey or an error if there is a problem with the encoding. -func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, errors.New("unable to find PEM encoded data") - } - - var key PrivateKey - - switch { - case pemBlock.Type == "RSA PRIVATE KEY": - rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) - } - key = fromRSAPrivateKey(rsaPrivateKey) - case pemBlock.Type == "EC PRIVATE KEY": - ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) - } - key, err = fromECPrivateKey(ecPrivateKey) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) - } - - addPEMHeadersToKey(pemBlock, key.PublicKey()) - - return key, nil -} - -// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic -// Public Key to be used with libtrust. -func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { - jwk := make(map[string]interface{}) - - err := json.Unmarshal(data, &jwk) - if err != nil { - return nil, fmt.Errorf( - "decoding JWK Public Key JSON data: %s\n", err, - ) - } - - // Get the Key Type value. - kty, err := stringFromMap(jwk, "kty") - if err != nil { - return nil, fmt.Errorf("JWK Public Key type: %s", err) - } - - switch { - case kty == "EC": - // Call out to unmarshal EC public key. - return ecPublicKeyFromMap(jwk) - case kty == "RSA": - // Call out to unmarshal RSA public key. - return rsaPublicKeyFromMap(jwk) - default: - return nil, fmt.Errorf( - "JWK Public Key type not supported: %q\n", kty, - ) - } -} - -// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set -// and returns a slice of Public Key objects. -func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { - rawKeys, err := loadJSONKeySetRaw(data) - if err != nil { - return nil, err - } - - pubKeys := make([]PublicKey, 0, len(rawKeys)) - - for _, rawKey := range rawKeys { - pubKey, err := UnmarshalPublicKeyJWK(rawKey) - if err != nil { - return nil, err - } - pubKeys = append(pubKeys, pubKey) - } - - return pubKeys, nil -} - -// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic -// Private Key to be used with libtrust. -func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { - jwk := make(map[string]interface{}) - - err := json.Unmarshal(data, &jwk) - if err != nil { - return nil, fmt.Errorf( - "decoding JWK Private Key JSON data: %s\n", err, - ) - } - - // Get the Key Type value. - kty, err := stringFromMap(jwk, "kty") - if err != nil { - return nil, fmt.Errorf("JWK Private Key type: %s", err) - } - - switch { - case kty == "EC": - // Call out to unmarshal EC private key. - return ecPrivateKeyFromMap(jwk) - case kty == "RSA": - // Call out to unmarshal RSA private key. - return rsaPrivateKeyFromMap(jwk) - default: - return nil, fmt.Errorf( - "JWK Private Key type not supported: %q\n", kty, - ) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go deleted file mode 100644 index c526de54..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go +++ /dev/null @@ -1,255 +0,0 @@ -package libtrust - -import ( - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "os" - "strings" -) - -var ( - // ErrKeyFileDoesNotExist indicates that the private key file does not exist. - ErrKeyFileDoesNotExist = errors.New("key file does not exist") -) - -func readKeyFileBytes(filename string) ([]byte, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if os.IsNotExist(err) { - err = ErrKeyFileDoesNotExist - } else { - err = fmt.Errorf("unable to read key file %s: %s", filename, err) - } - - return nil, err - } - - return data, nil -} - -/* - Loading and Saving of Public and Private Keys in either PEM or JWK format. -*/ - -// LoadKeyFile opens the given filename and attempts to read a Private Key -// encoded in either PEM or JWK format (if .json or .jwk file extension). -func LoadKeyFile(filename string) (PrivateKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil { - return nil, err - } - - var key PrivateKey - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - key, err = UnmarshalPrivateKeyJWK(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode private key JWK: %s", err) - } - } else { - key, err = UnmarshalPrivateKeyPEM(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode private key PEM: %s", err) - } - } - - return key, nil -} - -// LoadPublicKeyFile opens the given filename and attempts to read a Public Key -// encoded in either PEM or JWK format (if .json or .jwk file extension). -func LoadPublicKeyFile(filename string) (PublicKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil { - return nil, err - } - - var key PublicKey - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - key, err = UnmarshalPublicKeyJWK(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode public key JWK: %s", err) - } - } else { - key, err = UnmarshalPublicKeyPEM(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode public key PEM: %s", err) - } - } - - return key, nil -} - -// SaveKey saves the given key to a file using the provided filename. -// This process will overwrite any existing file at the provided location. -func SaveKey(filename string, key PrivateKey) error { - var encodedKey []byte - var err error - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - // Encode in JSON Web Key format. - encodedKey, err = json.MarshalIndent(key, "", " ") - if err != nil { - return fmt.Errorf("unable to encode private key JWK: %s", err) - } - } else { - // Encode in PEM format. - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encode private key PEM: %s", err) - } - encodedKey = pem.EncodeToMemory(pemBlock) - } - - err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) - if err != nil { - return fmt.Errorf("unable to write private key file %s: %s", filename, err) - } - - return nil -} - -// SavePublicKey saves the given public key to the file. -func SavePublicKey(filename string, key PublicKey) error { - var encodedKey []byte - var err error - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - // Encode in JSON Web Key format. - encodedKey, err = json.MarshalIndent(key, "", " ") - if err != nil { - return fmt.Errorf("unable to encode public key JWK: %s", err) - } - } else { - // Encode in PEM format. - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encode public key PEM: %s", err) - } - encodedKey = pem.EncodeToMemory(pemBlock) - } - - err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to write public key file %s: %s", filename, err) - } - - return nil -} - -// Public Key Set files - -type jwkSet struct { - Keys []json.RawMessage `json:"keys"` -} - -// LoadKeySetFile loads a key set -func LoadKeySetFile(filename string) ([]PublicKey, error) { - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - return loadJSONKeySetFile(filename) - } - - // Must be a PEM format file - return loadPEMKeySetFile(filename) -} - -func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { - if len(data) == 0 { - // This is okay, just return an empty slice. - return []json.RawMessage{}, nil - } - - keySet := jwkSet{} - - err := json.Unmarshal(data, &keySet) - if err != nil { - return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) - } - - return keySet.Keys, nil -} - -func loadJSONKeySetFile(filename string) ([]PublicKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return nil, err - } - - return UnmarshalPublicKeyJWKSet(contents) -} - -func loadPEMKeySetFile(filename string) ([]PublicKey, error) { - data, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return nil, err - } - - return UnmarshalPublicKeyPEMBundle(data) -} - -// AddKeySetFile adds a key to a key set -func AddKeySetFile(filename string, key PublicKey) error { - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - return addKeySetJSONFile(filename, key) - } - - // Must be a PEM format file - return addKeySetPEMFile(filename, key) -} - -func addKeySetJSONFile(filename string, key PublicKey) error { - encodedKey, err := json.Marshal(key) - if err != nil { - return fmt.Errorf("unable to encode trusted client key: %s", err) - } - - contents, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return err - } - - rawEntries, err := loadJSONKeySetRaw(contents) - if err != nil { - return err - } - - rawEntries = append(rawEntries, json.RawMessage(encodedKey)) - entriesWrapper := jwkSet{Keys: rawEntries} - - encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") - if err != nil { - return fmt.Errorf("unable to encode trusted client keys: %s", err) - } - - err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) - } - - return nil -} - -func addKeySetPEMFile(filename string, key PublicKey) error { - // Encode to PEM, open file for appending, write PEM. - file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) - } - defer file.Close() - - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encoded trusted key: %s", err) - } - - _, err = file.Write(pem.EncodeToMemory(pemBlock)) - if err != nil { - return fmt.Errorf("unable to write trusted keys file: %s", err) - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go deleted file mode 100644 index 57e691f2..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package libtrust - -import ( - "errors" - "io/ioutil" - "os" - "testing" -) - -func makeTempFile(t *testing.T, prefix string) (filename string) { - file, err := ioutil.TempFile("", prefix) - if err != nil { - t.Fatal(err) - } - - filename = file.Name() - file.Close() - - return -} - -func TestKeyFiles(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - testKeyFiles(t, key) - - key, err = GenerateRSA2048PrivateKey() - if err != nil { - t.Fatal(err) - } - - testKeyFiles(t, key) -} - -func testKeyFiles(t *testing.T, key PrivateKey) { - var err error - - privateKeyFilename := makeTempFile(t, "private_key") - privateKeyFilenamePEM := privateKeyFilename + ".pem" - privateKeyFilenameJWK := privateKeyFilename + ".jwk" - - publicKeyFilename := makeTempFile(t, "public_key") - publicKeyFilenamePEM := publicKeyFilename + ".pem" - publicKeyFilenameJWK := publicKeyFilename + ".jwk" - - if err = SaveKey(privateKeyFilenamePEM, key); err != nil { - t.Fatal(err) - } - - if err = SaveKey(privateKeyFilenameJWK, key); err != nil { - t.Fatal(err) - } - - if err = SavePublicKey(publicKeyFilenamePEM, key.PublicKey()); err != nil { - t.Fatal(err) - } - - if err = SavePublicKey(publicKeyFilenameJWK, key.PublicKey()); err != nil { - t.Fatal(err) - } - - loadedPEMKey, err := LoadKeyFile(privateKeyFilenamePEM) - if err != nil { - t.Fatal(err) - } - - loadedJWKKey, err := LoadKeyFile(privateKeyFilenameJWK) - if err != nil { - t.Fatal(err) - } - - loadedPEMPublicKey, err := LoadPublicKeyFile(publicKeyFilenamePEM) - if err != nil { - t.Fatal(err) - } - - loadedJWKPublicKey, err := LoadPublicKeyFile(publicKeyFilenameJWK) - if err != nil { - t.Fatal(err) - } - - if key.KeyID() != loadedPEMKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - if key.KeyID() != loadedJWKKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - if key.KeyID() != loadedPEMPublicKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - if key.KeyID() != loadedJWKPublicKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - os.Remove(privateKeyFilename) - os.Remove(privateKeyFilenamePEM) - os.Remove(privateKeyFilenameJWK) - os.Remove(publicKeyFilename) - os.Remove(publicKeyFilenamePEM) - os.Remove(publicKeyFilenameJWK) -} - -func TestTrustedHostKeysFile(t *testing.T) { - trustedHostKeysFilename := makeTempFile(t, "trusted_host_keys") - trustedHostKeysFilenamePEM := trustedHostKeysFilename + ".pem" - trustedHostKeysFilenameJWK := trustedHostKeysFilename + ".json" - - testTrustedHostKeysFile(t, trustedHostKeysFilenamePEM) - testTrustedHostKeysFile(t, trustedHostKeysFilenameJWK) - - os.Remove(trustedHostKeysFilename) - os.Remove(trustedHostKeysFilenamePEM) - os.Remove(trustedHostKeysFilenameJWK) -} - -func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) { - hostAddress1 := "docker.example.com:2376" - hostKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - hostKey1.AddExtendedField("hosts", []string{hostAddress1}) - err = AddKeySetFile(trustedHostKeysFilename, hostKey1.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedHostKeysMapping, err := LoadKeySetFile(trustedHostKeysFilename) - if err != nil { - t.Fatal(err) - } - - for addr, hostKey := range trustedHostKeysMapping { - t.Logf("Host Address: %d\n", addr) - t.Logf("Host Key: %s\n\n", hostKey) - } - - hostAddress2 := "192.168.59.103:2376" - hostKey2, err := GenerateRSA2048PrivateKey() - if err != nil { - t.Fatal(err) - } - - hostKey2.AddExtendedField("hosts", hostAddress2) - err = AddKeySetFile(trustedHostKeysFilename, hostKey2.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedHostKeysMapping, err = LoadKeySetFile(trustedHostKeysFilename) - if err != nil { - t.Fatal(err) - } - - for addr, hostKey := range trustedHostKeysMapping { - t.Logf("Host Address: %d\n", addr) - t.Logf("Host Key: %s\n\n", hostKey) - } - -} - -func TestTrustedClientKeysFile(t *testing.T) { - trustedClientKeysFilename := makeTempFile(t, "trusted_client_keys") - trustedClientKeysFilenamePEM := trustedClientKeysFilename + ".pem" - trustedClientKeysFilenameJWK := trustedClientKeysFilename + ".json" - - testTrustedClientKeysFile(t, trustedClientKeysFilenamePEM) - testTrustedClientKeysFile(t, trustedClientKeysFilenameJWK) - - os.Remove(trustedClientKeysFilename) - os.Remove(trustedClientKeysFilenamePEM) - os.Remove(trustedClientKeysFilenameJWK) -} - -func testTrustedClientKeysFile(t *testing.T, trustedClientKeysFilename string) { - clientKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - err = AddKeySetFile(trustedClientKeysFilename, clientKey1.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedClientKeys, err := LoadKeySetFile(trustedClientKeysFilename) - if err != nil { - t.Fatal(err) - } - - for _, clientKey := range trustedClientKeys { - t.Logf("Client Key: %s\n", clientKey) - } - - clientKey2, err := GenerateRSA2048PrivateKey() - if err != nil { - t.Fatal(err) - } - - err = AddKeySetFile(trustedClientKeysFilename, clientKey2.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedClientKeys, err = LoadKeySetFile(trustedClientKeysFilename) - if err != nil { - t.Fatal(err) - } - - for _, clientKey := range trustedClientKeys { - t.Logf("Client Key: %s\n", clientKey) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go deleted file mode 100644 index 9a98ae35..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go +++ /dev/null @@ -1,175 +0,0 @@ -package libtrust - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "os" - "path" - "sync" -) - -// ClientKeyManager manages client keys on the filesystem -type ClientKeyManager struct { - key PrivateKey - clientFile string - clientDir string - - clientLock sync.RWMutex - clients []PublicKey - - configLock sync.Mutex - configs []*tls.Config -} - -// NewClientKeyManager loads a new manager from a set of key files -// and managed by the given private key. -func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { - m := &ClientKeyManager{ - key: trustKey, - clientFile: clientFile, - clientDir: clientDir, - } - if err := m.loadKeys(); err != nil { - return nil, err - } - // TODO Start watching file and directory - - return m, nil -} - -func (c *ClientKeyManager) loadKeys() (err error) { - // Load authorized keys file - var clients []PublicKey - if c.clientFile != "" { - clients, err = LoadKeySetFile(c.clientFile) - if err != nil { - return fmt.Errorf("unable to load authorized keys: %s", err) - } - } - - // Add clients from authorized keys directory - files, err := ioutil.ReadDir(c.clientDir) - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("unable to open authorized keys directory: %s", err) - } - for _, f := range files { - if !f.IsDir() { - publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) - if err != nil { - return fmt.Errorf("unable to load authorized key file: %s", err) - } - clients = append(clients, publicKey) - } - } - - c.clientLock.Lock() - c.clients = clients - c.clientLock.Unlock() - - return nil -} - -// RegisterTLSConfig registers a tls configuration to manager -// such that any changes to the keys may be reflected in -// the tls client CA pool -func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { - c.clientLock.RLock() - certPool, err := GenerateCACertPool(c.key, c.clients) - if err != nil { - return fmt.Errorf("CA pool generation error: %s", err) - } - c.clientLock.RUnlock() - - tlsConfig.ClientCAs = certPool - - c.configLock.Lock() - c.configs = append(c.configs, tlsConfig) - c.configLock.Unlock() - - return nil -} - -// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for -// libtrust identity authentication for the domain specified -func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - if err := clients.RegisterTLSConfig(tlsConfig); err != nil { - return nil, err - } - - // Generate cert - ips, domains, err := parseAddr(addr) - if err != nil { - return nil, err - } - // add domain that it expects clients to use - domains = append(domains, domain) - x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) - if err != nil { - return nil, fmt.Errorf("certificate generation error: %s", err) - } - tlsConfig.Certificates = []tls.Certificate{{ - Certificate: [][]byte{x509Cert.Raw}, - PrivateKey: trustKey.CryptoPrivateKey(), - Leaf: x509Cert, - }} - - return tlsConfig, nil -} - -// NewCertAuthTLSConfig creates a tls.Config for the server to use for -// certificate authentication -func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - cert, err := tls.LoadX509KeyPair(certPath, keyPath) - if err != nil { - return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - - // Verify client certificates against a CA? - if caPath != "" { - certPool := x509.NewCertPool() - file, err := ioutil.ReadFile(caPath) - if err != nil { - return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) - } - certPool.AppendCertsFromPEM(file) - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - tlsConfig.ClientCAs = certPool - } - - return tlsConfig, nil -} - -func newTLSConfig() *tls.Config { - return &tls.Config{ - NextProtos: []string{"http/1.1"}, - // Avoid fallback on insecure SSL protocols - MinVersion: tls.VersionTLS10, - } -} - -// parseAddr parses an address into an array of IPs and domains -func parseAddr(addr string) ([]net.IP, []string, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, nil, err - } - var domains []string - var ips []net.IP - ip := net.ParseIP(host) - if ip != nil { - ips = []net.IP{ip} - } else { - domains = []string{host} - } - return ips, domains, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go deleted file mode 100644 index f6c59cc4..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package libtrust - -import ( - "testing" -) - -type generateFunc func() (PrivateKey, error) - -func runGenerateBench(b *testing.B, f generateFunc, name string) { - for i := 0; i < b.N; i++ { - _, err := f() - if err != nil { - b.Fatalf("Error generating %s: %s", name, err) - } - } -} - -func runFingerprintBench(b *testing.B, f generateFunc, name string) { - b.StopTimer() - // Don't count this relatively slow generation call. - key, err := f() - if err != nil { - b.Fatalf("Error generating %s: %s", name, err) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - if key.KeyID() == "" { - b.Fatalf("Error generating key ID for %s", name) - } - } -} - -func BenchmarkECP256Generate(b *testing.B) { - runGenerateBench(b, GenerateECP256PrivateKey, "P256") -} - -func BenchmarkECP384Generate(b *testing.B) { - runGenerateBench(b, GenerateECP384PrivateKey, "P384") -} - -func BenchmarkECP521Generate(b *testing.B) { - runGenerateBench(b, GenerateECP521PrivateKey, "P521") -} - -func BenchmarkRSA2048Generate(b *testing.B) { - runGenerateBench(b, GenerateRSA2048PrivateKey, "RSA2048") -} - -func BenchmarkRSA3072Generate(b *testing.B) { - runGenerateBench(b, GenerateRSA3072PrivateKey, "RSA3072") -} - -func BenchmarkRSA4096Generate(b *testing.B) { - runGenerateBench(b, GenerateRSA4096PrivateKey, "RSA4096") -} - -func BenchmarkECP256Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateECP256PrivateKey, "P256") -} - -func BenchmarkECP384Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateECP384PrivateKey, "P384") -} - -func BenchmarkECP521Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateECP521PrivateKey, "P521") -} - -func BenchmarkRSA2048Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateRSA2048PrivateKey, "RSA2048") -} - -func BenchmarkRSA3072Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateRSA3072PrivateKey, "RSA3072") -} - -func BenchmarkRSA4096Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateRSA4096PrivateKey, "RSA4096") -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go deleted file mode 100644 index dac4cacf..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go +++ /dev/null @@ -1,427 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" -) - -/* - * RSA DSA PUBLIC KEY - */ - -// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. -type rsaPublicKey struct { - *rsa.PublicKey - extended map[string]interface{} -} - -func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { - return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} -} - -// KeyType returns the JWK key type for RSA keys, i.e., "RSA". -func (k *rsaPublicKey) KeyType() string { - return "RSA" -} - -// KeyID returns a distinct identifier which is unique to this Public Key. -func (k *rsaPublicKey) KeyID() string { - return keyIDFromCryptoKey(k) -} - -func (k *rsaPublicKey) String() string { - return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) -} - -// Verify verifyies the signature of the data in the io.Reader using this Public Key. -// The alg parameter should be the name of the JWA digital signature algorithm -// which was used to produce the signature and should be supported by this -// public key. Returns a nil error if the signature is valid. -func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { - // Verify the signature of the given date, return non-nil error if valid. - sigAlg, err := rsaSignatureAlgorithmByName(alg) - if err != nil { - return fmt.Errorf("unable to verify Signature: %s", err) - } - - hasher := sigAlg.HashID().New() - _, err = io.Copy(hasher, data) - if err != nil { - return fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) - if err != nil { - return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) - } - - return nil -} - -// CryptoPublicKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return k.PublicKey -} - -func (k *rsaPublicKey) toMap() map[string]interface{} { - jwk := make(map[string]interface{}) - for k, v := range k.extended { - jwk[k] = v - } - jwk["kty"] = k.KeyType() - jwk["kid"] = k.KeyID() - jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) - jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) - - return jwk -} - -// MarshalJSON serializes this Public Key using the JWK JSON serialization format for -// RSA keys. -func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Public Key to DER-encoded PKIX format. -func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) - } - k.extended["kid"] = k.KeyID() // For display purposes. - return createPemBlock("PUBLIC KEY", derBytes, k.extended) -} - -func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { - k.extended[field] = value -} - -func (k *rsaPublicKey) GetExtendedField(field string) interface{} { - v, ok := k.extended[field] - if !ok { - return nil - } - return v -} - -func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { - // JWK key type (kty) has already been determined to be "RSA". - // Need to extract 'n', 'e', and 'kid' and check for - // consistency. - - // Get the modulus parameter N. - nB64Url, err := stringFromMap(jwk, "n") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) - } - - n, err := parseRSAModulusParam(nB64Url) - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) - } - - // Get the public exponent E. - eB64Url, err := stringFromMap(jwk, "e") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) - } - - e, err := parseRSAPublicExponentParam(eB64Url) - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) - } - - key := &rsaPublicKey{ - PublicKey: &rsa.PublicKey{N: n, E: e}, - } - - // Key ID is optional, but if it exists, it should match the key. - _, ok := jwk["kid"] - if ok { - kid, err := stringFromMap(jwk, "kid") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) - } - if kid != key.KeyID() { - return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) - } - } - - if _, ok := jwk["d"]; ok { - return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") - } - - key.extended = jwk - - return key, nil -} - -/* - * RSA DSA PRIVATE KEY - */ - -// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. -type rsaPrivateKey struct { - rsaPublicKey - *rsa.PrivateKey -} - -func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { - return &rsaPrivateKey{ - *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), - cryptoPrivateKey, - } -} - -// PublicKey returns the Public Key data associated with this Private Key. -func (k *rsaPrivateKey) PublicKey() PublicKey { - return &k.rsaPublicKey -} - -func (k *rsaPrivateKey) String() string { - return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) -} - -// Sign signs the data read from the io.Reader using a signature algorithm supported -// by the RSA private key. If the specified hashing algorithm is supported by -// this key, that hash function is used to generate the signature otherwise the -// the default hashing algorithm for this key is used. Returns the signature -// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", -// "RS512". -func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { - // Generate a signature of the data using the internal alg. - sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) - hasher := sigAlg.HashID().New() - - _, err = io.Copy(hasher, data) - if err != nil { - return nil, "", fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) - if err != nil { - return nil, "", fmt.Errorf("error producing signature: %s", err) - } - - alg = sigAlg.HeaderParam() - - return -} - -// CryptoPrivateKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { - return k.PrivateKey -} - -func (k *rsaPrivateKey) toMap() map[string]interface{} { - k.Precompute() // Make sure the precomputed values are stored. - jwk := k.rsaPublicKey.toMap() - - jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) - jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) - jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) - jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) - jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) - jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) - - otherPrimes := k.Primes[2:] - - if len(otherPrimes) > 0 { - otherPrimesInfo := make([]interface{}, len(otherPrimes)) - for i, r := range otherPrimes { - otherPrimeInfo := make(map[string]string, 3) - otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) - crtVal := k.Precomputed.CRTValues[i] - otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) - otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) - otherPrimesInfo[i] = otherPrimeInfo - } - jwk["oth"] = otherPrimesInfo - } - - return jwk -} - -// MarshalJSON serializes this Private Key using the JWK JSON serialization format for -// RSA keys. -func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Private Key to DER-encoded PKIX format. -func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { - derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) - k.extended["keyID"] = k.KeyID() // For display purposes. - return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) -} - -func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { - // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that - // only the private key exponent 'd' is REQUIRED, the others are just for - // signature/decryption optimizations and SHOULD be included when the JWK - // is produced. We MAY choose to accept a JWK which only includes 'd', but - // we're going to go ahead and not choose to accept it without the extra - // fields. Only the 'oth' field will be optional (for multi-prime keys). - privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) - } - firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) - } - - var oth interface{} - if _, ok := jwk["oth"]; ok { - oth = jwk["oth"] - delete(jwk, "oth") - } - - // JWK key type (kty) has already been determined to be "RSA". - // Need to extract the public key information, then extract the private - // key values. - publicKey, err := rsaPublicKeyFromMap(jwk) - if err != nil { - return nil, err - } - - privateKey := &rsa.PrivateKey{ - PublicKey: *publicKey.PublicKey, - D: privateExponent, - Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, - Precomputed: rsa.PrecomputedValues{ - Dp: firstFactorCRT, - Dq: secondFactorCRT, - Qinv: crtCoeff, - }, - } - - if oth != nil { - // Should be an array of more JSON objects. - otherPrimesInfo, ok := oth.([]interface{}) - if !ok { - return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") - } - numOtherPrimeFactors := len(otherPrimesInfo) - if numOtherPrimeFactors == 0 { - return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") - } - otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) - productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) - crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) - - for i, val := range otherPrimesInfo { - otherPrimeinfo, ok := val.(map[string]interface{}) - if !ok { - return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") - } - - otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) - } - - crtValue := crtValues[i] - crtValue.Exp = otherFactorCRT - crtValue.Coeff = otherCrtCoeff - crtValue.R = productOfPrimes - otherPrimeFactors[i] = otherPrimeFactor - productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) - } - - privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) - privateKey.Precomputed.CRTValues = crtValues - } - - key := &rsaPrivateKey{ - rsaPublicKey: *publicKey, - PrivateKey: privateKey, - } - - return key, nil -} - -/* - * Key Generation Functions. - */ - -func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { - k = new(rsaPrivateKey) - k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, err - } - - k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey - k.extended = make(map[string]interface{}) - - return -} - -// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. -func GenerateRSA2048PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(2048) - if err != nil { - return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) - } - - return k, nil -} - -// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. -func GenerateRSA3072PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(3072) - if err != nil { - return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) - } - - return k, nil -} - -// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. -func GenerateRSA4096PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(4096) - if err != nil { - return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) - } - - return k, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go deleted file mode 100644 index 5ec7707a..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package libtrust - -import ( - "bytes" - "encoding/json" - "log" - "testing" -) - -var rsaKeys []PrivateKey - -func init() { - var err error - rsaKeys, err = generateRSATestKeys() - if err != nil { - log.Fatal(err) - } -} - -func generateRSATestKeys() (keys []PrivateKey, err error) { - log.Println("Generating RSA 2048-bit Test Key") - rsa2048Key, err := GenerateRSA2048PrivateKey() - if err != nil { - return - } - - log.Println("Generating RSA 3072-bit Test Key") - rsa3072Key, err := GenerateRSA3072PrivateKey() - if err != nil { - return - } - - log.Println("Generating RSA 4096-bit Test Key") - rsa4096Key, err := GenerateRSA4096PrivateKey() - if err != nil { - return - } - - log.Println("Done generating RSA Test Keys!") - keys = []PrivateKey{rsa2048Key, rsa3072Key, rsa4096Key} - - return -} - -func TestRSAKeys(t *testing.T) { - for _, rsaKey := range rsaKeys { - if rsaKey.KeyType() != "RSA" { - t.Fatalf("key type must be %q, instead got %q", "RSA", rsaKey.KeyType()) - } - } -} - -func TestRSASignVerify(t *testing.T) { - message := "Hello, World!" - data := bytes.NewReader([]byte(message)) - - sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} - - for i, rsaKey := range rsaKeys { - sigAlg := sigAlgs[i] - - t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, rsaKey.KeyID()) - - data.Seek(0, 0) // Reset the byte reader - - // Sign - sig, alg, err := rsaKey.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - - // Verify - err = rsaKey.Verify(data, alg, sig) - if err != nil { - t.Fatal(err) - } - } -} - -func TestMarshalUnmarshalRSAKeys(t *testing.T) { - data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) - sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} - - for i, rsaKey := range rsaKeys { - sigAlg := sigAlgs[i] - privateJWKJSON, err := json.MarshalIndent(rsaKey, "", " ") - if err != nil { - t.Fatal(err) - } - - publicJWKJSON, err := json.MarshalIndent(rsaKey.PublicKey(), "", " ") - if err != nil { - t.Fatal(err) - } - - t.Logf("JWK Private Key: %s", string(privateJWKJSON)) - t.Logf("JWK Public Key: %s", string(publicJWKJSON)) - - privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) - if err != nil { - t.Fatal(err) - } - - pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) - if err != nil { - t.Fatal(err) - } - - // Ensure we can sign/verify a message with the unmarshalled keys. - data.Seek(0, 0) // Reset the byte reader - signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - err = pubKey2.Verify(data, alg, signature) - if err != nil { - t.Fatal(err) - } - - // It's a good idea to validate the Private Key to make sure our - // (un)marshal process didn't corrupt the extra parameters. - k := privKey2.(*rsaPrivateKey) - err = k.PrivateKey.Validate() - if err != nil { - t.Fatal(err) - } - } -} - -func TestFromCryptoRSAKeys(t *testing.T) { - for _, rsaKey := range rsaKeys { - cryptoPrivateKey := rsaKey.CryptoPrivateKey() - cryptoPublicKey := rsaKey.CryptoPublicKey() - - pubKey, err := FromCryptoPublicKey(cryptoPublicKey) - if err != nil { - t.Fatal(err) - } - - if pubKey.KeyID() != rsaKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - - privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) - if err != nil { - t.Fatal(err) - } - - if privKey.KeyID() != rsaKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go b/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go deleted file mode 100644 index 89debf6b..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go +++ /dev/null @@ -1,94 +0,0 @@ -package testutil - -import ( - "crypto" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "math/big" - "time" -) - -// GenerateTrustCA generates a new certificate authority for testing. -func GenerateTrustCA(pub crypto.PublicKey, priv crypto.PrivateKey) (*x509.Certificate, error) { - cert := &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: "CA Root", - }, - NotBefore: time.Now().Add(-time.Second), - NotAfter: time.Now().Add(time.Hour), - IsCA: true, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, - BasicConstraintsValid: true, - } - - certDER, err := x509.CreateCertificate(rand.Reader, cert, cert, pub, priv) - if err != nil { - return nil, err - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, err - } - - return cert, nil -} - -// GenerateIntermediate generates an intermediate certificate for testing using -// the parent certificate (likely a CA) and the provided keys. -func GenerateIntermediate(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { - cert := &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: "Intermediate", - }, - NotBefore: time.Now().Add(-time.Second), - NotAfter: time.Now().Add(time.Hour), - IsCA: true, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, - BasicConstraintsValid: true, - } - - certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) - if err != nil { - return nil, err - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, err - } - - return cert, nil -} - -// GenerateTrustCert generates a new trust certificate for testing. Unlike the -// intermediate certificates, this certificate should be used for signature -// only, not creating certificates. -func GenerateTrustCert(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { - cert := &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: "Trust Cert", - }, - NotBefore: time.Now().Add(-time.Second), - NotAfter: time.Now().Add(time.Hour), - IsCA: true, - KeyUsage: x509.KeyUsageDigitalSignature, - BasicConstraintsValid: true, - } - - certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) - if err != nil { - return nil, err - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, err - } - - return cert, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md deleted file mode 100644 index 24124db2..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md +++ /dev/null @@ -1,50 +0,0 @@ -## Libtrust TLS Config Demo - -This program generates key pairs and trust files for a TLS client and server. - -To generate the keys, run: - -``` -$ go run genkeys.go -``` - -The generated files are: - -``` -$ ls -l client_data/ server_data/ -client_data/: -total 24 --rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json --rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json --rw-r--r-- 1 jlhawn staff 275 Aug 8 16:21 trusted_hosts.json - -server_data/: -total 24 --rw-r--r-- 1 jlhawn staff 348 Aug 8 16:21 trusted_clients.json --rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json --rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json -``` - -The private key and public key for the client and server are stored in `private_key.json` and `public_key.json`, respectively, and in their respective directories. They are represented as JSON Web Keys: JSON objects which represent either an ECDSA or RSA private key. The host keys trusted by the client are stored in `trusted_hosts.json` and contain a mapping of an internet address, `:`, to a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted server. The client keys trusted by the server are stored in `trusted_clients.json` and contain an array of JSON objects which contain a comment field which can be used describe the key and a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted client. - -To start the server, run: - -``` -$ go run server.go -``` - -This starts an HTTPS server which listens on `localhost:8888`. The server configures itself with a certificate which is valid for both `localhost` and `127.0.0.1` and uses the key from `server_data/private_key.json`. It accepts connections from clients which present a certificate for a key that it is configured to trust from the `trusted_clients.json` file and returns a simple 'hello' message. - -To make a request using the client, run: - -``` -$ go run client.go -``` - -This command creates an HTTPS client which makes a GET request to `https://localhost:8888`. The client configures itself with a certificate using the key from `client_data/private_key.json`. It only connects to a server which presents a certificate signed by the key specified for the `localhost:8888` address from `client_data/trusted_hosts.json` and made to be used for the `localhost` hostname. If the connection succeeds, it prints the response from the server. - -The file `gencert.go` can be used to generate PEM encoded version of the client key and certificate. If you save them to `key.pem` and `cert.pem` respectively, you can use them with `curl` to test out the server (if it is still running). - -``` -curl --cert cert.pem --key key.pem -k https://localhost:8888 -``` diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go deleted file mode 100644 index 0a699a0e..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go +++ /dev/null @@ -1,89 +0,0 @@ -package main - -import ( - "crypto/tls" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - - "github.com/docker/libtrust" -) - -var ( - serverAddress = "localhost:8888" - privateKeyFilename = "client_data/private_key.pem" - trustedHostsFilename = "client_data/trusted_hosts.pem" -) - -func main() { - // Load Client Key. - clientKey, err := libtrust.LoadKeyFile(privateKeyFilename) - if err != nil { - log.Fatal(err) - } - - // Generate Client Certificate. - selfSignedClientCert, err := libtrust.GenerateSelfSignedClientCert(clientKey) - if err != nil { - log.Fatal(err) - } - - // Load trusted host keys. - hostKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) - if err != nil { - log.Fatal(err) - } - - // Ensure the host we want to connect to is trusted! - host, _, err := net.SplitHostPort(serverAddress) - if err != nil { - log.Fatal(err) - } - serverKeys, err := libtrust.FilterByHosts(hostKeys, host, false) - if err != nil { - log.Fatalf("%q is not a known and trusted host", host) - } - - // Generate a CA pool with the trusted host's key. - caPool, err := libtrust.GenerateCACertPool(clientKey, serverKeys) - if err != nil { - log.Fatal(err) - } - - // Create HTTP Client. - client := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - Certificates: []tls.Certificate{ - tls.Certificate{ - Certificate: [][]byte{selfSignedClientCert.Raw}, - PrivateKey: clientKey.CryptoPrivateKey(), - Leaf: selfSignedClientCert, - }, - }, - RootCAs: caPool, - }, - }, - } - - var makeRequest = func(url string) { - resp, err := client.Get(url) - if err != nil { - log.Fatal(err) - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Fatal(err) - } - - log.Println(resp.Status) - log.Println(string(body)) - } - - // Make the request to the trusted server! - makeRequest(fmt.Sprintf("https://%s", serverAddress)) -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go deleted file mode 100644 index c65f3b6b..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go +++ /dev/null @@ -1,62 +0,0 @@ -package main - -import ( - "encoding/pem" - "fmt" - "log" - "net" - - "github.com/docker/libtrust" -) - -var ( - serverAddress = "localhost:8888" - clientPrivateKeyFilename = "client_data/private_key.pem" - trustedHostsFilename = "client_data/trusted_hosts.pem" -) - -func main() { - key, err := libtrust.LoadKeyFile(clientPrivateKeyFilename) - if err != nil { - log.Fatal(err) - } - - keyPEMBlock, err := key.PEMBlock() - if err != nil { - log.Fatal(err) - } - - encodedPrivKey := pem.EncodeToMemory(keyPEMBlock) - fmt.Printf("Client Key:\n\n%s\n", string(encodedPrivKey)) - - cert, err := libtrust.GenerateSelfSignedClientCert(key) - if err != nil { - log.Fatal(err) - } - - encodedCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) - fmt.Printf("Client Cert:\n\n%s\n", string(encodedCert)) - - trustedServerKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) - if err != nil { - log.Fatal(err) - } - - hostname, _, err := net.SplitHostPort(serverAddress) - if err != nil { - log.Fatal(err) - } - - trustedServerKeys, err = libtrust.FilterByHosts(trustedServerKeys, hostname, false) - if err != nil { - log.Fatal(err) - } - - caCert, err := libtrust.GenerateCACert(key, trustedServerKeys[0]) - if err != nil { - log.Fatal(err) - } - - encodedCert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: caCert.Raw}) - fmt.Printf("CA Cert:\n\n%s\n", string(encodedCert)) -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go deleted file mode 100644 index 9dc8842a..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go +++ /dev/null @@ -1,61 +0,0 @@ -package main - -import ( - "log" - - "github.com/docker/libtrust" -) - -func main() { - // Generate client key. - clientKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - log.Fatal(err) - } - - // Add a comment for the client key. - clientKey.AddExtendedField("comment", "TLS Demo Client") - - // Save the client key, public and private versions. - err = libtrust.SaveKey("client_data/private_key.pem", clientKey) - if err != nil { - log.Fatal(err) - } - - err = libtrust.SavePublicKey("client_data/public_key.pem", clientKey.PublicKey()) - if err != nil { - log.Fatal(err) - } - - // Generate server key. - serverKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - log.Fatal(err) - } - - // Set the list of addresses to use for the server. - serverKey.AddExtendedField("hosts", []string{"localhost", "docker.example.com"}) - - // Save the server key, public and private versions. - err = libtrust.SaveKey("server_data/private_key.pem", serverKey) - if err != nil { - log.Fatal(err) - } - - err = libtrust.SavePublicKey("server_data/public_key.pem", serverKey.PublicKey()) - if err != nil { - log.Fatal(err) - } - - // Generate Authorized Keys file for server. - err = libtrust.AddKeySetFile("server_data/trusted_clients.pem", clientKey.PublicKey()) - if err != nil { - log.Fatal(err) - } - - // Generate Known Host Keys file for client. - err = libtrust.AddKeySetFile("client_data/trusted_hosts.pem", serverKey.PublicKey()) - if err != nil { - log.Fatal(err) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go deleted file mode 100644 index d3cb2ea9..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go +++ /dev/null @@ -1,80 +0,0 @@ -package main - -import ( - "crypto/tls" - "fmt" - "html" - "log" - "net" - "net/http" - - "github.com/docker/libtrust" -) - -var ( - serverAddress = "localhost:8888" - privateKeyFilename = "server_data/private_key.pem" - authorizedClientsFilename = "server_data/trusted_clients.pem" -) - -func requestHandler(w http.ResponseWriter, r *http.Request) { - clientCert := r.TLS.PeerCertificates[0] - keyID := clientCert.Subject.CommonName - log.Printf("Request from keyID: %s\n", keyID) - fmt.Fprintf(w, "Hello, client! I'm a server! And you are %T: %s.\n", clientCert.PublicKey, html.EscapeString(keyID)) -} - -func main() { - // Load server key. - serverKey, err := libtrust.LoadKeyFile(privateKeyFilename) - if err != nil { - log.Fatal(err) - } - - // Generate server certificate. - selfSignedServerCert, err := libtrust.GenerateSelfSignedServerCert( - serverKey, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}, - ) - if err != nil { - log.Fatal(err) - } - - // Load authorized client keys. - authorizedClients, err := libtrust.LoadKeySetFile(authorizedClientsFilename) - if err != nil { - log.Fatal(err) - } - - // Create CA pool using trusted client keys. - caPool, err := libtrust.GenerateCACertPool(serverKey, authorizedClients) - if err != nil { - log.Fatal(err) - } - - // Create TLS config, requiring client certificates. - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{ - tls.Certificate{ - Certificate: [][]byte{selfSignedServerCert.Raw}, - PrivateKey: serverKey.CryptoPrivateKey(), - Leaf: selfSignedServerCert, - }, - }, - ClientAuth: tls.RequireAndVerifyClientCert, - ClientCAs: caPool, - } - - // Create HTTP server with simple request handler. - server := &http.Server{ - Addr: serverAddress, - Handler: http.HandlerFunc(requestHandler), - } - - // Listen and server HTTPS using the libtrust TLS config. - listener, err := net.Listen("tcp", server.Addr) - if err != nil { - log.Fatal(err) - } - tlsListener := tls.NewListener(listener, tlsConfig) - server.Serve(tlsListener) -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go deleted file mode 100644 index 72b0fc36..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go +++ /dev/null @@ -1,50 +0,0 @@ -package trustgraph - -import "github.com/docker/libtrust" - -// TrustGraph represents a graph of authorization mapping -// public keys to nodes and grants between nodes. -type TrustGraph interface { - // Verifies that the given public key is allowed to perform - // the given action on the given node according to the trust - // graph. - Verify(libtrust.PublicKey, string, uint16) (bool, error) - - // GetGrants returns an array of all grant chains which are used to - // allow the requested permission. - GetGrants(libtrust.PublicKey, string, uint16) ([][]*Grant, error) -} - -// Grant represents a transfer of permission from one part of the -// trust graph to another. This is the only way to delegate -// permission between two different sub trees in the graph. -type Grant struct { - // Subject is the namespace being granted - Subject string - - // Permissions is a bit map of permissions - Permission uint16 - - // Grantee represents the node being granted - // a permission scope. The grantee can be - // either a namespace item or a key id where namespace - // items will always start with a '/'. - Grantee string - - // statement represents the statement used to create - // this object. - statement *Statement -} - -// Permissions -// Read node 0x01 (can read node, no sub nodes) -// Write node 0x02 (can write to node object, cannot create subnodes) -// Read subtree 0x04 (delegates read to each sub node) -// Write subtree 0x08 (delegates write to each sub node, included create on the subject) -// -// Permission shortcuts -// ReadItem = 0x01 -// WriteItem = 0x03 -// ReadAccess = 0x07 -// WriteAccess = 0x0F -// Delegate = 0x0F diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go deleted file mode 100644 index 247bfa7a..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go +++ /dev/null @@ -1,133 +0,0 @@ -package trustgraph - -import ( - "strings" - - "github.com/docker/libtrust" -) - -type grantNode struct { - grants []*Grant - children map[string]*grantNode -} - -type memoryGraph struct { - roots map[string]*grantNode -} - -func newGrantNode() *grantNode { - return &grantNode{ - grants: []*Grant{}, - children: map[string]*grantNode{}, - } -} - -// NewMemoryGraph returns a new in memory trust graph created from -// a static list of grants. This graph is immutable after creation -// and any alterations should create a new instance. -func NewMemoryGraph(grants []*Grant) TrustGraph { - roots := map[string]*grantNode{} - for _, grant := range grants { - parts := strings.Split(grant.Grantee, "/") - nodes := roots - var node *grantNode - var nodeOk bool - for _, part := range parts { - node, nodeOk = nodes[part] - if !nodeOk { - node = newGrantNode() - nodes[part] = node - } - if part != "" { - node.grants = append(node.grants, grant) - } - nodes = node.children - } - } - return &memoryGraph{roots} -} - -func (g *memoryGraph) getGrants(name string) []*Grant { - nameParts := strings.Split(name, "/") - nodes := g.roots - var node *grantNode - var nodeOk bool - for _, part := range nameParts { - node, nodeOk = nodes[part] - if !nodeOk { - return nil - } - nodes = node.children - } - return node.grants -} - -func isSubName(name, sub string) bool { - if strings.HasPrefix(name, sub) { - if len(name) == len(sub) || name[len(sub)] == '/' { - return true - } - } - return false -} - -type walkFunc func(*Grant, []*Grant) bool - -func foundWalkFunc(*Grant, []*Grant) bool { - return true -} - -func (g *memoryGraph) walkGrants(start, target string, permission uint16, f walkFunc, chain []*Grant, visited map[*Grant]bool, collect bool) bool { - if visited == nil { - visited = map[*Grant]bool{} - } - grants := g.getGrants(start) - subGrants := make([]*Grant, 0, len(grants)) - for _, grant := range grants { - if visited[grant] { - continue - } - visited[grant] = true - if grant.Permission&permission == permission { - if isSubName(target, grant.Subject) { - if f(grant, chain) { - return true - } - } else { - subGrants = append(subGrants, grant) - } - } - } - for _, grant := range subGrants { - var chainCopy []*Grant - if collect { - chainCopy = make([]*Grant, len(chain)+1) - copy(chainCopy, chain) - chainCopy[len(chainCopy)-1] = grant - } else { - chainCopy = nil - } - - if g.walkGrants(grant.Subject, target, permission, f, chainCopy, visited, collect) { - return true - } - } - return false -} - -func (g *memoryGraph) Verify(key libtrust.PublicKey, node string, permission uint16) (bool, error) { - return g.walkGrants(key.KeyID(), node, permission, foundWalkFunc, nil, nil, false), nil -} - -func (g *memoryGraph) GetGrants(key libtrust.PublicKey, node string, permission uint16) ([][]*Grant, error) { - grants := [][]*Grant{} - collect := func(grant *Grant, chain []*Grant) bool { - grantChain := make([]*Grant, len(chain)+1) - copy(grantChain, chain) - grantChain[len(grantChain)-1] = grant - grants = append(grants, grantChain) - return false - } - g.walkGrants(key.KeyID(), node, permission, collect, nil, nil, true) - return grants, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go deleted file mode 100644 index 49fd0f3b..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package trustgraph - -import ( - "fmt" - "testing" - - "github.com/docker/libtrust" -) - -func createTestKeysAndGrants(count int) ([]*Grant, []libtrust.PrivateKey) { - grants := make([]*Grant, count) - keys := make([]libtrust.PrivateKey, count) - for i := 0; i < count; i++ { - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - panic(err) - } - grant := &Grant{ - Subject: fmt.Sprintf("/user-%d", i+1), - Permission: 0x0f, - Grantee: pk.KeyID(), - } - keys[i] = pk - grants[i] = grant - } - return grants, keys -} - -func testVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) { - if ok, err := g.Verify(k, target, permission); err != nil { - t.Fatalf("Unexpected error during verification: %s", err) - } else if !ok { - t.Errorf("key failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target) - } -} - -func testNotVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) { - if ok, err := g.Verify(k, target, permission); err != nil { - t.Fatalf("Unexpected error during verification: %s", err) - } else if ok { - t.Errorf("key should have failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target) - } -} - -func TestVerify(t *testing.T) { - grants, keys := createTestKeysAndGrants(4) - extraGrants := make([]*Grant, 3) - extraGrants[0] = &Grant{ - Subject: "/user-3", - Permission: 0x0f, - Grantee: "/user-2", - } - extraGrants[1] = &Grant{ - Subject: "/user-3/sub-project", - Permission: 0x0f, - Grantee: "/user-4", - } - extraGrants[2] = &Grant{ - Subject: "/user-4", - Permission: 0x07, - Grantee: "/user-1", - } - grants = append(grants, extraGrants...) - - g := NewMemoryGraph(grants) - - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1/some-project/sub-value", 0x0f) - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x07) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2/", 0x0f) - testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3/sub-value", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-value", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/", 0x0f) - testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f) - testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project/app", 0x0f) - testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f) - - testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f) - testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3/sub-value", 0x0f) - testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x0f) - testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1/", 0x0f) - testNotVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-2", 0x0f) - testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-4", 0x0f) - testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f) -} - -func TestCircularWalk(t *testing.T) { - grants, keys := createTestKeysAndGrants(3) - user1Grant := &Grant{ - Subject: "/user-2", - Permission: 0x0f, - Grantee: "/user-1", - } - user2Grant := &Grant{ - Subject: "/user-1", - Permission: 0x0f, - Grantee: "/user-2", - } - grants = append(grants, user1Grant, user2Grant) - - g := NewMemoryGraph(grants) - - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1", 0x0f) - testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f) - - testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3", 0x0f) - testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) -} - -func assertGrantSame(t *testing.T, actual, expected *Grant) { - if actual != expected { - t.Fatalf("Unexpected grant retrieved\n\tExpected: %v\n\tActual: %v", expected, actual) - } -} - -func TestGetGrants(t *testing.T) { - grants, keys := createTestKeysAndGrants(5) - extraGrants := make([]*Grant, 4) - extraGrants[0] = &Grant{ - Subject: "/user-3/friend-project", - Permission: 0x0f, - Grantee: "/user-2/friends", - } - extraGrants[1] = &Grant{ - Subject: "/user-3/sub-project", - Permission: 0x0f, - Grantee: "/user-4", - } - extraGrants[2] = &Grant{ - Subject: "/user-2/friends", - Permission: 0x0f, - Grantee: "/user-5/fun-project", - } - extraGrants[3] = &Grant{ - Subject: "/user-5/fun-project", - Permission: 0x0f, - Grantee: "/user-1", - } - grants = append(grants, extraGrants...) - - g := NewMemoryGraph(grants) - - grantChains, err := g.GetGrants(keys[3], "/user-3/sub-project/specific-app", 0x0f) - if err != nil { - t.Fatalf("Error getting grants: %s", err) - } - if len(grantChains) != 1 { - t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains)) - } - if len(grantChains[0]) != 2 { - t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0])) - } - assertGrantSame(t, grantChains[0][0], grants[3]) - assertGrantSame(t, grantChains[0][1], extraGrants[1]) - - grantChains, err = g.GetGrants(keys[0], "/user-3/friend-project/fun-app", 0x0f) - if err != nil { - t.Fatalf("Error getting grants: %s", err) - } - if len(grantChains) != 1 { - t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains)) - } - if len(grantChains[0]) != 4 { - t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0])) - } - assertGrantSame(t, grantChains[0][0], grants[0]) - assertGrantSame(t, grantChains[0][1], extraGrants[3]) - assertGrantSame(t, grantChains[0][2], extraGrants[2]) - assertGrantSame(t, grantChains[0][3], extraGrants[0]) -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go deleted file mode 100644 index 7a74b553..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go +++ /dev/null @@ -1,227 +0,0 @@ -package trustgraph - -import ( - "crypto/x509" - "encoding/json" - "io" - "io/ioutil" - "sort" - "strings" - "time" - - "github.com/docker/libtrust" -) - -type jsonGrant struct { - Subject string `json:"subject"` - Permission uint16 `json:"permission"` - Grantee string `json:"grantee"` -} - -type jsonRevocation struct { - Subject string `json:"subject"` - Revocation uint16 `json:"revocation"` - Grantee string `json:"grantee"` -} - -type jsonStatement struct { - Revocations []*jsonRevocation `json:"revocations"` - Grants []*jsonGrant `json:"grants"` - Expiration time.Time `json:"expiration"` - IssuedAt time.Time `json:"issuedAt"` -} - -func (g *jsonGrant) Grant(statement *Statement) *Grant { - return &Grant{ - Subject: g.Subject, - Permission: g.Permission, - Grantee: g.Grantee, - statement: statement, - } -} - -// Statement represents a set of grants made from a verifiable -// authority. A statement has an expiration associated with it -// set by the authority. -type Statement struct { - jsonStatement - - signature *libtrust.JSONSignature -} - -// IsExpired returns whether the statement has expired -func (s *Statement) IsExpired() bool { - return s.Expiration.Before(time.Now().Add(-10 * time.Second)) -} - -// Bytes returns an indented json representation of the statement -// in a byte array. This value can be written to a file or stream -// without alteration. -func (s *Statement) Bytes() ([]byte, error) { - return s.signature.PrettySignature("signatures") -} - -// LoadStatement loads and verifies a statement from an input stream. -func LoadStatement(r io.Reader, authority *x509.CertPool) (*Statement, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - js, err := libtrust.ParsePrettySignature(b, "signatures") - if err != nil { - return nil, err - } - payload, err := js.Payload() - if err != nil { - return nil, err - } - var statement Statement - err = json.Unmarshal(payload, &statement.jsonStatement) - if err != nil { - return nil, err - } - - if authority == nil { - _, err = js.Verify() - if err != nil { - return nil, err - } - } else { - _, err = js.VerifyChains(authority) - if err != nil { - return nil, err - } - } - statement.signature = js - - return &statement, nil -} - -// CreateStatements creates and signs a statement from a stream of grants -// and revocations in a JSON array. -func CreateStatement(grants, revocations io.Reader, expiration time.Duration, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { - var statement Statement - err := json.NewDecoder(grants).Decode(&statement.jsonStatement.Grants) - if err != nil { - return nil, err - } - err = json.NewDecoder(revocations).Decode(&statement.jsonStatement.Revocations) - if err != nil { - return nil, err - } - statement.jsonStatement.Expiration = time.Now().UTC().Add(expiration) - statement.jsonStatement.IssuedAt = time.Now().UTC() - - b, err := json.MarshalIndent(&statement.jsonStatement, "", " ") - if err != nil { - return nil, err - } - - statement.signature, err = libtrust.NewJSONSignature(b) - if err != nil { - return nil, err - } - err = statement.signature.SignWithChain(key, chain) - if err != nil { - return nil, err - } - - return &statement, nil -} - -type statementList []*Statement - -func (s statementList) Len() int { - return len(s) -} - -func (s statementList) Less(i, j int) bool { - return s[i].IssuedAt.Before(s[j].IssuedAt) -} - -func (s statementList) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// CollapseStatements returns a single list of the valid statements as well as the -// time when the next grant will expire. -func CollapseStatements(statements []*Statement, useExpired bool) ([]*Grant, time.Time, error) { - sorted := make(statementList, 0, len(statements)) - for _, statement := range statements { - if useExpired || !statement.IsExpired() { - sorted = append(sorted, statement) - } - } - sort.Sort(sorted) - - var minExpired time.Time - var grantCount int - roots := map[string]*grantNode{} - for i, statement := range sorted { - if statement.Expiration.Before(minExpired) || i == 0 { - minExpired = statement.Expiration - } - for _, grant := range statement.Grants { - parts := strings.Split(grant.Grantee, "/") - nodes := roots - g := grant.Grant(statement) - grantCount = grantCount + 1 - - for _, part := range parts { - node, nodeOk := nodes[part] - if !nodeOk { - node = newGrantNode() - nodes[part] = node - } - node.grants = append(node.grants, g) - nodes = node.children - } - } - - for _, revocation := range statement.Revocations { - parts := strings.Split(revocation.Grantee, "/") - nodes := roots - - var node *grantNode - var nodeOk bool - for _, part := range parts { - node, nodeOk = nodes[part] - if !nodeOk { - break - } - nodes = node.children - } - if node != nil { - for _, grant := range node.grants { - if isSubName(grant.Subject, revocation.Subject) { - grant.Permission = grant.Permission &^ revocation.Revocation - } - } - } - } - } - - retGrants := make([]*Grant, 0, grantCount) - for _, rootNodes := range roots { - retGrants = append(retGrants, rootNodes.grants...) - } - - return retGrants, minExpired, nil -} - -// FilterStatements filters the statements to statements including the given grants. -func FilterStatements(grants []*Grant) ([]*Statement, error) { - statements := map[*Statement]bool{} - for _, grant := range grants { - if grant.statement != nil { - statements[grant.statement] = true - } - } - retStatements := make([]*Statement, len(statements)) - var i int - for statement := range statements { - retStatements[i] = statement - i++ - } - return retStatements, nil -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go deleted file mode 100644 index e5094686..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go +++ /dev/null @@ -1,417 +0,0 @@ -package trustgraph - -import ( - "bytes" - "crypto/x509" - "encoding/json" - "testing" - "time" - - "github.com/docker/libtrust" - "github.com/docker/libtrust/testutil" -) - -const testStatementExpiration = time.Hour * 5 - -func generateStatement(grants []*Grant, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { - var statement Statement - - statement.Grants = make([]*jsonGrant, len(grants)) - for i, grant := range grants { - statement.Grants[i] = &jsonGrant{ - Subject: grant.Subject, - Permission: grant.Permission, - Grantee: grant.Grantee, - } - } - statement.IssuedAt = time.Now() - statement.Expiration = time.Now().Add(testStatementExpiration) - statement.Revocations = make([]*jsonRevocation, 0) - - marshalled, err := json.MarshalIndent(statement.jsonStatement, "", " ") - if err != nil { - return nil, err - } - - sig, err := libtrust.NewJSONSignature(marshalled) - if err != nil { - return nil, err - } - err = sig.SignWithChain(key, chain) - if err != nil { - return nil, err - } - statement.signature = sig - - return &statement, nil -} - -func generateTrustChain(t *testing.T, chainLen int) (libtrust.PrivateKey, *x509.CertPool, []*x509.Certificate) { - caKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating key: %s", err) - } - ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) - if err != nil { - t.Fatalf("Error generating ca: %s", err) - } - - parent := ca - parentKey := caKey - chain := make([]*x509.Certificate, chainLen) - for i := chainLen - 1; i > 0; i-- { - intermediatekey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generate key: %s", err) - } - chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) - if err != nil { - t.Fatalf("Error generating intermdiate certificate: %s", err) - } - parent = chain[i] - parentKey = intermediatekey - } - trustKey, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generate key: %s", err) - } - chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) - if err != nil { - t.Fatalf("Error generate trust cert: %s", err) - } - - caPool := x509.NewCertPool() - caPool.AddCert(ca) - - return trustKey, caPool, chain -} - -func TestLoadStatement(t *testing.T) { - grantCount := 4 - grants, _ := createTestKeysAndGrants(grantCount) - - trustKey, caPool, chain := generateTrustChain(t, 6) - - statement, err := generateStatement(grants, trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - - statementBytes, err := statement.Bytes() - if err != nil { - t.Fatalf("Error getting statement bytes: %s", err) - } - - s2, err := LoadStatement(bytes.NewReader(statementBytes), caPool) - if err != nil { - t.Fatalf("Error loading statement: %s", err) - } - if len(s2.Grants) != grantCount { - t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants)) - } - - pool := x509.NewCertPool() - _, err = LoadStatement(bytes.NewReader(statementBytes), pool) - if err == nil { - t.Fatalf("No error thrown verifying without an authority") - } else if _, ok := err.(x509.UnknownAuthorityError); !ok { - t.Fatalf("Unexpected error verifying without authority: %s", err) - } - - s2, err = LoadStatement(bytes.NewReader(statementBytes), nil) - if err != nil { - t.Fatalf("Error loading statement: %s", err) - } - if len(s2.Grants) != grantCount { - t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants)) - } - - badData := make([]byte, len(statementBytes)) - copy(badData, statementBytes) - badData[0] = '[' - _, err = LoadStatement(bytes.NewReader(badData), nil) - if err == nil { - t.Fatalf("No error thrown parsing bad json") - } - - alteredData := make([]byte, len(statementBytes)) - copy(alteredData, statementBytes) - alteredData[30] = '0' - _, err = LoadStatement(bytes.NewReader(alteredData), nil) - if err == nil { - t.Fatalf("No error thrown from bad data") - } -} - -func TestCollapseGrants(t *testing.T) { - grantCount := 8 - grants, keys := createTestKeysAndGrants(grantCount) - linkGrants := make([]*Grant, 4) - linkGrants[0] = &Grant{ - Subject: "/user-3", - Permission: 0x0f, - Grantee: "/user-2", - } - linkGrants[1] = &Grant{ - Subject: "/user-3/sub-project", - Permission: 0x0f, - Grantee: "/user-4", - } - linkGrants[2] = &Grant{ - Subject: "/user-6", - Permission: 0x0f, - Grantee: "/user-7", - } - linkGrants[3] = &Grant{ - Subject: "/user-6/sub-project/specific-app", - Permission: 0x0f, - Grantee: "/user-5", - } - trustKey, pool, chain := generateTrustChain(t, 3) - - statements := make([]*Statement, 3) - var err error - statements[0], err = generateStatement(grants[0:4], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[1], err = generateStatement(grants[4:], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[2], err = generateStatement(linkGrants, trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - - statementsCopy := make([]*Statement, len(statements)) - for i, statement := range statements { - b, err := statement.Bytes() - if err != nil { - t.Fatalf("Error getting statement bytes: %s", err) - } - verifiedStatement, err := LoadStatement(bytes.NewReader(b), pool) - if err != nil { - t.Fatalf("Error loading statement: %s", err) - } - // Force sort by reversing order - statementsCopy[len(statementsCopy)-i-1] = verifiedStatement - } - statements = statementsCopy - - collapsedGrants, expiration, err := CollapseStatements(statements, false) - if len(collapsedGrants) != 12 { - t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) - } - if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { - t.Fatalf("Unexpected expiration time: %s", expiration.String()) - } - g := NewMemoryGraph(collapsedGrants) - - testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) - testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f) - testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f) - testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-5", 0x0f) - testVerified(t, g, keys[5].PublicKey(), "user-key-6", "/user-6", 0x0f) - testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-7", 0x0f) - testVerified(t, g, keys[7].PublicKey(), "user-key-8", "/user-8", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-project/specific-app", 0x0f) - testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f) - testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6", 0x0f) - testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f) - testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project/specific-app", 0x0f) - - testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f) - testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-6/sub-project", 0x0f) - testNotVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project", 0x0f) - - // Add revocation grant - statements = append(statements, &Statement{ - jsonStatement{ - IssuedAt: time.Now(), - Expiration: time.Now().Add(testStatementExpiration), - Grants: []*jsonGrant{}, - Revocations: []*jsonRevocation{ - &jsonRevocation{ - Subject: "/user-1", - Revocation: 0x0f, - Grantee: keys[0].KeyID(), - }, - &jsonRevocation{ - Subject: "/user-2", - Revocation: 0x08, - Grantee: keys[1].KeyID(), - }, - &jsonRevocation{ - Subject: "/user-6", - Revocation: 0x0f, - Grantee: "/user-7", - }, - &jsonRevocation{ - Subject: "/user-9", - Revocation: 0x0f, - Grantee: "/user-10", - }, - }, - }, - nil, - }) - - collapsedGrants, expiration, err = CollapseStatements(statements, false) - if len(collapsedGrants) != 12 { - t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) - } - if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { - t.Fatalf("Unexpected expiration time: %s", expiration.String()) - } - g = NewMemoryGraph(collapsedGrants) - - testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) - testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) - testNotVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f) - - testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x07) -} - -func TestFilterStatements(t *testing.T) { - grantCount := 8 - grants, keys := createTestKeysAndGrants(grantCount) - linkGrants := make([]*Grant, 3) - linkGrants[0] = &Grant{ - Subject: "/user-3", - Permission: 0x0f, - Grantee: "/user-2", - } - linkGrants[1] = &Grant{ - Subject: "/user-5", - Permission: 0x0f, - Grantee: "/user-4", - } - linkGrants[2] = &Grant{ - Subject: "/user-7", - Permission: 0x0f, - Grantee: "/user-6", - } - - trustKey, _, chain := generateTrustChain(t, 3) - - statements := make([]*Statement, 5) - var err error - statements[0], err = generateStatement(grants[0:2], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[1], err = generateStatement(grants[2:4], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[2], err = generateStatement(grants[4:6], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[3], err = generateStatement(grants[6:], trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - statements[4], err = generateStatement(linkGrants, trustKey, chain) - if err != nil { - t.Fatalf("Error generating statement: %s", err) - } - collapsed, _, err := CollapseStatements(statements, false) - if err != nil { - t.Fatalf("Error collapsing grants: %s", err) - } - - // Filter 1, all 5 statements - filter1, err := FilterStatements(collapsed) - if err != nil { - t.Fatalf("Error filtering statements: %s", err) - } - if len(filter1) != 5 { - t.Fatalf("Wrong number of statements, expected %d, received %d", 5, len(filter1)) - } - - // Filter 2, one statement - filter2, err := FilterStatements([]*Grant{collapsed[0]}) - if err != nil { - t.Fatalf("Error filtering statements: %s", err) - } - if len(filter2) != 1 { - t.Fatalf("Wrong number of statements, expected %d, received %d", 1, len(filter2)) - } - - // Filter 3, 2 statements, from graph lookup - g := NewMemoryGraph(collapsed) - lookupGrants, err := g.GetGrants(keys[1], "/user-3", 0x0f) - if err != nil { - t.Fatalf("Error looking up grants: %s", err) - } - if len(lookupGrants) != 1 { - t.Fatalf("Wrong numberof grant chains returned from lookup, expected %d, received %d", 1, len(lookupGrants)) - } - if len(lookupGrants[0]) != 2 { - t.Fatalf("Wrong number of grants looked up, expected %d, received %d", 2, len(lookupGrants)) - } - filter3, err := FilterStatements(lookupGrants[0]) - if err != nil { - t.Fatalf("Error filtering statements: %s", err) - } - if len(filter3) != 2 { - t.Fatalf("Wrong number of statements, expected %d, received %d", 2, len(filter3)) - } - -} - -func TestCreateStatement(t *testing.T) { - grantJSON := bytes.NewReader([]byte(`[ - { - "subject": "/user-2", - "permission": 15, - "grantee": "/user-1" - }, - { - "subject": "/user-7", - "permission": 1, - "grantee": "/user-9" - }, - { - "subject": "/user-3", - "permission": 15, - "grantee": "/user-2" - } -]`)) - revocationJSON := bytes.NewReader([]byte(`[ - { - "subject": "user-8", - "revocation": 12, - "grantee": "user-9" - } -]`)) - - trustKey, pool, chain := generateTrustChain(t, 3) - - statement, err := CreateStatement(grantJSON, revocationJSON, testStatementExpiration, trustKey, chain) - if err != nil { - t.Fatalf("Error creating statement: %s", err) - } - - b, err := statement.Bytes() - if err != nil { - t.Fatalf("Error retrieving bytes: %s", err) - } - - verified, err := LoadStatement(bytes.NewReader(b), pool) - if err != nil { - t.Fatalf("Error loading statement: %s", err) - } - - if len(verified.Grants) != 3 { - t.Errorf("Unexpected number of grants, expected %d, received %d", 3, len(verified.Grants)) - } - - if len(verified.Revocations) != 1 { - t.Errorf("Unexpected number of revocations, expected %d, received %d", 1, len(verified.Revocations)) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/util.go b/Godeps/_workspace/src/github.com/docker/libtrust/util.go deleted file mode 100644 index 45dc3e18..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/util.go +++ /dev/null @@ -1,361 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto" - "crypto/elliptic" - "crypto/tls" - "crypto/x509" - "encoding/base32" - "encoding/base64" - "encoding/binary" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net/url" - "os" - "path/filepath" - "strings" - "time" -) - -// LoadOrCreateTrustKey will load a PrivateKey from the specified path -func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { - if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { - return nil, err - } - - trustKey, err := LoadKeyFile(trustKeyPath) - if err == ErrKeyFileDoesNotExist { - trustKey, err = GenerateECP256PrivateKey() - if err != nil { - return nil, fmt.Errorf("error generating key: %s", err) - } - - if err := SaveKey(trustKeyPath, trustKey); err != nil { - return nil, fmt.Errorf("error saving key file: %s", err) - } - - dir, file := filepath.Split(trustKeyPath) - if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { - return nil, fmt.Errorf("error saving public key file: %s", err) - } - } else if err != nil { - return nil, fmt.Errorf("error loading key file: %s", err) - } - return trustKey, nil -} - -// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity -// based authentication from the specified dockerUrl, the rootConfigPath and -// the server name to which it is connecting. -// If trustUnknownHosts is true it will automatically add the host to the -// known-hosts.json in rootConfigPath. -func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - trustKeyPath := filepath.Join(rootConfigPath, "key.json") - knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") - - u, err := url.Parse(dockerUrl) - if err != nil { - return nil, fmt.Errorf("unable to parse machine url") - } - - if u.Scheme == "unix" { - return nil, nil - } - - addr := u.Host - proto := "tcp" - - trustKey, err := LoadOrCreateTrustKey(trustKeyPath) - if err != nil { - return nil, fmt.Errorf("unable to load trust key: %s", err) - } - - knownHosts, err := LoadKeySetFile(knownHostsPath) - if err != nil { - return nil, fmt.Errorf("could not load trusted hosts file: %s", err) - } - - allowedHosts, err := FilterByHosts(knownHosts, addr, false) - if err != nil { - return nil, fmt.Errorf("error filtering hosts: %s", err) - } - - certPool, err := GenerateCACertPool(trustKey, allowedHosts) - if err != nil { - return nil, fmt.Errorf("Could not create CA pool: %s", err) - } - - tlsConfig.ServerName = serverName - tlsConfig.RootCAs = certPool - - x509Cert, err := GenerateSelfSignedClientCert(trustKey) - if err != nil { - return nil, fmt.Errorf("certificate generation error: %s", err) - } - - tlsConfig.Certificates = []tls.Certificate{{ - Certificate: [][]byte{x509Cert.Raw}, - PrivateKey: trustKey.CryptoPrivateKey(), - Leaf: x509Cert, - }} - - tlsConfig.InsecureSkipVerify = true - - testConn, err := tls.Dial(proto, addr, tlsConfig) - if err != nil { - return nil, fmt.Errorf("tls Handshake error: %s", err) - } - - opts := x509.VerifyOptions{ - Roots: tlsConfig.RootCAs, - CurrentTime: time.Now(), - DNSName: tlsConfig.ServerName, - Intermediates: x509.NewCertPool(), - } - - certs := testConn.ConnectionState().PeerCertificates - for i, cert := range certs { - if i == 0 { - continue - } - opts.Intermediates.AddCert(cert) - } - - if _, err := certs[0].Verify(opts); err != nil { - if _, ok := err.(x509.UnknownAuthorityError); ok { - if trustUnknownHosts { - pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) - if err != nil { - return nil, fmt.Errorf("error extracting public key from cert: %s", err) - } - - pubKey.AddExtendedField("hosts", []string{addr}) - - if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { - return nil, fmt.Errorf("error adding machine to known hosts: %s", err) - } - } else { - return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) - } - } - } - - testConn.Close() - tlsConfig.InsecureSkipVerify = false - - return tlsConfig, nil -} - -// joseBase64UrlEncode encodes the given data using the standard base64 url -// encoding format but with all trailing '=' characters ommitted in accordance -// with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlEncode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// joseBase64UrlDecode decodes the given string using the standard base64 url -// decoder but first adds the appropriate number of trailing '=' characters in -// accordance with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlDecode(s string) ([]byte, error) { - switch len(s) % 4 { - case 0: - case 2: - s += "==" - case 3: - s += "=" - default: - return nil, errors.New("illegal base64url string") - } - return base64.URLEncoding.DecodeString(s) -} - -func keyIDEncode(b []byte) string { - s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") - var buf bytes.Buffer - var i int - for i = 0; i < len(s)/4-1; i++ { - start := i * 4 - end := start + 4 - buf.WriteString(s[start:end] + ":") - } - buf.WriteString(s[i*4:]) - return buf.String() -} - -func keyIDFromCryptoKey(pubKey PublicKey) string { - // Generate and return a 'libtrust' fingerprint of the public key. - // For an RSA key this should be: - // SHA256(DER encoded ASN1) - // Then truncated to 240 bits and encoded into 12 base32 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) - if err != nil { - return "" - } - hasher := crypto.SHA256.New() - hasher.Write(derBytes) - return keyIDEncode(hasher.Sum(nil)[:30]) -} - -func stringFromMap(m map[string]interface{}, key string) (string, error) { - val, ok := m[key] - if !ok { - return "", fmt.Errorf("%q value not specified", key) - } - - str, ok := val.(string) - if !ok { - return "", fmt.Errorf("%q value must be a string", key) - } - delete(m, key) - - return str, nil -} - -func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { - curveByteLen := (curve.Params().BitSize + 7) >> 3 - - cBytes, err := joseBase64UrlDecode(cB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - cByteLength := len(cBytes) - if cByteLength != curveByteLen { - return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) - } - return new(big.Int).SetBytes(cBytes), nil -} - -func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { - dBytes, err := joseBase64UrlDecode(dB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - - // The length of this octet string MUST be ceiling(log-base-2(n)/8) - // octets (where n is the order of the curve). This is because the private - // key d must be in the interval [1, n-1] so the bitlength of d should be - // no larger than the bitlength of n-1. The easiest way to find the octet - // length is to take bitlength(n-1), add 7 to force a carry, and shift this - // bit sequence right by 3, which is essentially dividing by 8 and adding - // 1 if there is any remainder. Thus, the private key value d should be - // output to (bitlength(n-1)+7)>>3 octets. - n := curve.Params().N - octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 - dByteLength := len(dBytes) - - if dByteLength != octetLength { - return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) - } - - return new(big.Int).SetBytes(dBytes), nil -} - -func parseRSAModulusParam(nB64Url string) (*big.Int, error) { - nBytes, err := joseBase64UrlDecode(nB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - - return new(big.Int).SetBytes(nBytes), nil -} - -func serializeRSAPublicExponentParam(e int) []byte { - // We MUST use the minimum number of octets to represent E. - // E is supposed to be 65537 for performance and security reasons - // and is what golang's rsa package generates, but it might be - // different if imported from some other generator. - buf := make([]byte, 4) - binary.BigEndian.PutUint32(buf, uint32(e)) - var i int - for i = 0; i < 8; i++ { - if buf[i] != 0 { - break - } - } - return buf[i:] -} - -func parseRSAPublicExponentParam(eB64Url string) (int, error) { - eBytes, err := joseBase64UrlDecode(eB64Url) - if err != nil { - return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - // Only the minimum number of bytes were used to represent E, but - // binary.BigEndian.Uint32 expects at least 4 bytes, so we need - // to add zero padding if necassary. - byteLen := len(eBytes) - buf := make([]byte, 4-byteLen, 4) - eBytes = append(buf, eBytes...) - - return int(binary.BigEndian.Uint32(eBytes)), nil -} - -func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { - b64Url, err := stringFromMap(m, key) - if err != nil { - return nil, err - } - - paramBytes, err := joseBase64UrlDecode(b64Url) - if err != nil { - return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) - } - - return new(big.Int).SetBytes(paramBytes), nil -} - -func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { - pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} - for k, v := range headers { - switch val := v.(type) { - case string: - pemBlock.Headers[k] = val - case []string: - if k == "hosts" { - pemBlock.Headers[k] = strings.Join(val, ",") - } else { - // Return error, non-encodable type - } - default: - // Return error, non-encodable type - } - } - - return pemBlock, nil -} - -func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { - cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) - } - - pubKey, err := FromCryptoPublicKey(cryptoPublicKey) - if err != nil { - return nil, err - } - - addPEMHeadersToKey(pemBlock, pubKey) - - return pubKey, nil -} - -func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { - for key, value := range pemBlock.Headers { - var safeVal interface{} - if key == "hosts" { - safeVal = strings.Split(value, ",") - } else { - safeVal = value - } - pubKey.AddExtendedField(key, safeVal) - } -} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go deleted file mode 100644 index ee54f5b8..00000000 --- a/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package libtrust - -import ( - "encoding/pem" - "reflect" - "testing" -) - -func TestAddPEMHeadersToKey(t *testing.T) { - pk := &rsaPublicKey{nil, map[string]interface{}{}} - blk := &pem.Block{Headers: map[string]string{"hosts": "localhost,127.0.0.1"}} - addPEMHeadersToKey(blk, pk) - - val := pk.GetExtendedField("hosts") - hosts, ok := val.([]string) - if !ok { - t.Fatalf("hosts type(%v), expected []string", reflect.TypeOf(val)) - } - expected := []string{"localhost", "127.0.0.1"} - if !reflect.DeepEqual(hosts, expected) { - t.Errorf("hosts(%v), expected %v", hosts, expected) - } -} diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/docker/factory.go b/Godeps/_workspace/src/github.com/rancherio/rancher-compose/docker/factory.go index 3e254552..cdcae72c 100644 --- a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/docker/factory.go +++ b/Godeps/_workspace/src/github.com/rancherio/rancher-compose/docker/factory.go @@ -19,31 +19,37 @@ func Convert(c *project.ServiceConfig) (*runconfig.Config, *runconfig.HostConfig cmd, _ := shlex.Split(c.Command) entrypoint, _ := shlex.Split(c.Entrypoint) ports, binding, err := nat.ParsePortSpecs(c.Ports) + restart, err := runconfig.ParseRestartPolicy(c.Restart) + dns := c.Dns.Slice() + labels := c.Labels.MapParts() if err != nil { return nil, nil, err } config := &runconfig.Config{ - Entrypoint: entrypoint, + Entrypoint: runconfig.NewEntrypoint(entrypoint...), Hostname: c.Hostname, Domainname: c.DomainName, User: c.User, - Memory: c.MemLimit, - CpuShares: c.CpuShares, Env: c.Environment, - Cmd: cmd, + Cmd: runconfig.NewCommand(cmd...), Image: c.Image, - Labels: kvListToMap(c.Labels), + Labels: labels, ExposedPorts: ports, + Tty: c.Tty, + OpenStdin: c.StdinOpen, + WorkingDir: c.WorkingDir, } host_config := &runconfig.HostConfig{ + Memory: c.MemLimit, + CpuShares: c.CpuShares, VolumesFrom: c.VolumesFrom, CapAdd: c.CapAdd, CapDrop: c.CapDrop, Privileged: c.Privileged, Binds: c.Volumes, - Dns: c.Dns, + Dns: dns, LogConfig: runconfig.LogConfig{ Type: c.LogDriver, }, @@ -52,6 +58,7 @@ func Convert(c *project.ServiceConfig) (*runconfig.Config, *runconfig.HostConfig PidMode: runconfig.PidMode(c.Pid), IpcMode: runconfig.IpcMode(c.Ipc), PortBindings: binding, + RestartPolicy: restart, } return config, host_config, nil diff --git a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/project/types.go b/Godeps/_workspace/src/github.com/rancherio/rancher-compose/project/types.go index 0b469c16..cb83495b 100644 --- a/Godeps/_workspace/src/github.com/rancherio/rancher-compose/project/types.go +++ b/Godeps/_workspace/src/github.com/rancherio/rancher-compose/project/types.go @@ -1,6 +1,10 @@ package project -import "github.com/rancherio/go-rancher/client" +import ( + "github.com/rancherio/go-rancher/client" + "gopkg.in/yaml.v2" + "strings" +) type Event string @@ -20,13 +24,110 @@ const ( PROJECT_RELOAD_TRIGGER = Event("Triggering project reload") ) +type Stringorslice struct { + parts []string +} + +func (s *Stringorslice) MarshalYAML() (interface{}, error) { + if s == nil { + return nil, nil + } + return yaml.Marshal(s.Slice()) +} + +func (s *Stringorslice) UnmarshalYAML(unmarshal func(interface{}) error) error { + var sliceType []string + err := unmarshal(&sliceType) + if err == nil { + s.parts = sliceType + return nil + } + + var stringType string + err = unmarshal(&stringType) + if err == nil { + sliceType = make([]string, 0, 1) + s.parts = append(sliceType, string(stringType)) + return nil + } + return err +} + +func (s *Stringorslice) Len() int { + if s == nil { + return 0 + } + return len(s.parts) +} + +func (s *Stringorslice) Slice() []string { + if s == nil { + return nil + } + return s.parts +} + +func NewStringorslice(parts ...string) *Stringorslice { + return &Stringorslice{parts} +} + +type SliceorMap struct { + parts map[string]string +} + +func (s *SliceorMap) MarshalYAML() (interface{}, error) { + if s == nil { + return nil, nil + } + return yaml.Marshal(s.MapParts()) +} + +func (s *SliceorMap) UnmarshalYAML(unmarshal func(interface{}) error) error { + mapType := make(map[string]string) + err := unmarshal(&mapType) + if err == nil { + s.parts = mapType + return nil + } + + var sliceType []string + var keyValueSlice []string + var key string + var value string + + err = unmarshal(&sliceType) + if err == nil { + mapType = make(map[string]string) + for _, slice := range sliceType { + keyValueSlice = strings.Split(slice, "=") //split up key and value into []string + key = keyValueSlice[0] + value = keyValueSlice[1] + mapType[key] = value + } + s.parts = mapType + return nil + } + return err +} + +func (s *SliceorMap) MapParts() map[string]string { + if s == nil { + return nil + } + return s.parts +} + +func NewSliceorMap(parts map[string]string) *SliceorMap { + return &SliceorMap{parts} +} + type ServiceConfig struct { CapAdd []string `yaml:"cap_add,omitempty"` CapDrop []string `yaml:"cap_drop,omitempty"` CpuShares int64 `yaml:"cpu_shares,omitempty"` Command string `yaml:"command,omitempty"` Detach string `yaml:"detach,omitempty"` - Dns []string `yaml:"dns,omitempty"` + Dns *Stringorslice DnsSearch string `yaml:"dns_search,omitempty"` DomainName string `yaml:"domainname,omitempty"` Entrypoint string `yaml:"entrypoint,omitempty"` @@ -34,7 +135,8 @@ type ServiceConfig struct { Environment []string `yaml:"environment,omitempty"` Hostname string `yaml:"hostname,omitempty"` Image string `yaml:"image,omitempty"` - Labels []string `yaml:"labels,omitempty"` + //Labels map[string]string `yaml:"labels,omitempty"` + Labels *SliceorMap Links []string `yaml:"links,omitempty"` LogDriver string `yaml:"log_driver,omitempty"` MemLimit int64 `yaml:"mem_limit,omitempty"` From afa3c3e94aa30318cbe3d0605bb6b99ce74db2a3 Mon Sep 17 00:00:00 2001 From: Darren Shepherd Date: Thu, 30 Apr 2015 22:16:27 -0700 Subject: [PATCH 3/3] Switch to new labels syntax. Also launch acpid earlier --- config/default.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/config/default.go b/config/default.go index f35340bf..a7d9f0cd 100644 --- a/config/default.go +++ b/config/default.go @@ -330,12 +330,9 @@ func NewConfig() *Config { "acpid": { Image: "acpid", Privileged: true, - Links: []string{ - "console", - }, - Labels: []string{ - SCOPE + "=" + SYSTEM, - }, + Labels: project.NewSliceorMap(map[string]string{ + SCOPE: SYSTEM, + }), VolumesFrom: []string{ "command-volumes", "system-volumes",