From caeacfa6ed6b3d7071cca9d5d9c9ec9707198e97 Mon Sep 17 00:00:00 2001 From: Ivan Mikushin Date: Thu, 4 Feb 2016 22:40:30 -0800 Subject: [PATCH] Bump a few libs to latest tagged versions --- trash.yml | 8 +- vendor/github.com/Sirupsen/logrus/.travis.yml | 3 +- .../github.com/Sirupsen/logrus/CHANGELOG.md | 12 + vendor/github.com/Sirupsen/logrus/README.md | 41 +- vendor/github.com/Sirupsen/logrus/logger.go | 6 + .../github.com/Sirupsen/logrus/logrus_test.go | 15 + .../Sirupsen/logrus/terminal_notwindows.go | 4 +- .../Sirupsen/logrus/terminal_solaris.go | 15 + .../Sirupsen/logrus/terminal_windows.go | 4 +- .../Sirupsen/logrus/text_formatter.go | 4 +- vendor/github.com/codegangsta/cli/.travis.yml | 15 +- vendor/github.com/codegangsta/cli/README.md | 119 +-- vendor/github.com/codegangsta/cli/app.go | 145 +--- vendor/github.com/codegangsta/cli/app_test.go | 715 ++---------------- vendor/github.com/codegangsta/cli/cli.go | 21 - vendor/github.com/codegangsta/cli/cli_test.go | 100 +++ vendor/github.com/codegangsta/cli/command.go | 127 +--- .../codegangsta/cli/command_test.go | 73 +- vendor/github.com/codegangsta/cli/context.go | 136 +--- .../codegangsta/cli/context_test.go | 65 +- vendor/github.com/codegangsta/cli/flag.go | 264 ++----- .../github.com/codegangsta/cli/flag_test.go | 506 +++---------- vendor/github.com/codegangsta/cli/help.go | 123 ++- .../github.com/codegangsta/cli/help_test.go | 94 --- .../codegangsta/cli/helpers_test.go | 6 +- .../github.com/docker/distribution/.mailmap | 8 + vendor/github.com/docker/distribution/AUTHORS | 36 +- .../github.com/docker/distribution/Dockerfile | 2 +- .../docker/distribution/MAINTAINERS | 67 +- .../github.com/docker/distribution/Makefile | 2 +- .../github.com/docker/distribution/README.md | 15 +- .../github.com/docker/distribution/blobs.go | 33 +- .../github.com/docker/distribution/circle.yml | 57 +- .../docker/distribution/coverpkg.sh | 7 + .../docker/distribution/digest/digest.go | 60 +- .../docker/distribution/digest/digest_test.go | 49 +- .../docker/distribution/digest/digester.go | 46 +- .../docker/distribution/digest/doc.go | 14 +- .../docker/distribution/digest/set_test.go | 65 +- .../docker/distribution/digest/tarsum.go | 70 -- .../docker/distribution/digest/tarsum_test.go | 79 -- .../docker/distribution/digest/verifiers.go | 86 +-- .../distribution/digest/verifiers_test.go | 119 +-- .../github.com/docker/distribution/errors.go | 20 + .../docker/distribution/manifest/doc.go | 1 - .../distribution/manifest/schema1/manifest.go | 130 ---- .../manifest/schema1/manifest_test.go | 108 --- .../distribution/manifest/schema1/sign.go | 66 -- .../distribution/manifest/schema1/verify.go | 32 - .../docker/distribution/manifest/versioned.go | 9 - .../docker/distribution/manifests.go | 117 +++ .../distribution/reference/reference.go | 25 +- .../distribution/reference/reference_test.go | 12 +- .../docker/distribution/reference/regexp.go | 121 ++- .../distribution/reference/regexp_test.go | 10 + .../docker/distribution/registry.go | 64 +- .../registry/api/errcode/errors.go | 16 +- .../registry/api/errcode/errors_test.go | 52 +- .../registry/api/v2/descriptors.go | 66 +- .../registry/api/v2/routes_test.go | 8 - .../distribution/registry/api/v2/urls.go | 34 +- .../distribution/registry/api/v2/urls_test.go | 81 +- .../registry/client/auth/session.go | 31 +- .../registry/client/blob_writer.go | 2 +- .../distribution/registry/client/errors.go | 25 +- .../registry/client/errors_test.go | 89 +++ .../registry/client/repository.go | 469 +++++++++--- .../registry/client/repository_test.go | 342 ++++++--- .../registry/client/transport/http_reader.go | 84 +- .../registry/storage/blob_test.go | 183 ++++- .../registry/storage/blobserver.go | 67 +- .../registry/storage/blobstore.go | 7 +- .../registry/storage/blobwriter.go | 5 +- .../registry/storage/blobwriter_resumable.go | 5 +- .../distribution/registry/storage/catalog.go | 25 +- .../registry/storage/linkedblobstore.go | 96 ++- .../registry/storage/manifestlisthandler.go | 96 +++ .../registry/storage/manifeststore.go | 186 +++-- .../registry/storage/manifeststore_test.go | 126 ++- .../distribution/registry/storage/paths.go | 23 +- .../registry/storage/paths_test.go | 47 +- .../distribution/registry/storage/registry.go | 83 +- .../registry/storage/revisionstore.go | 111 --- .../storage/schema2manifesthandler.go | 99 +++ .../registry/storage/signaturestore.go | 13 +- .../registry/storage/signedmanifesthandler.go | 150 ++++ .../distribution/registry/storage/tagstore.go | 112 ++- .../registry/storage/tagstore_test.go | 208 +++++ .../distribution/registry/storage/walk.go | 10 +- .../registry/storage/walk_test.go | 51 +- vendor/github.com/docker/distribution/tags.go | 27 + .../docker/libnetwork/.dockerignore | 1 + .../github.com/docker/libnetwork/.gitignore | 5 +- .../github.com/docker/libnetwork/CHANGELOG.md | 55 ++ .../docker/libnetwork/Dockerfile.build | 13 + .../github.com/docker/libnetwork/MAINTAINERS | 57 +- vendor/github.com/docker/libnetwork/Makefile | 84 +- vendor/github.com/docker/libnetwork/README.md | 38 +- .../github.com/docker/libnetwork/circle.yml | 18 +- .../docker/libnetwork/controller.go | 57 +- .../docker/libnetwork/default_gateway.go | 13 +- .../github.com/docker/libnetwork/endpoint.go | 202 +++-- .../docker/libnetwork/endpoint_info.go | 35 +- .../libnetwork/libnetwork_internal_test.go | 42 +- .../docker/libnetwork/libnetwork_test.go | 151 +++- vendor/github.com/docker/libnetwork/machines | 111 +++ .../github.com/docker/libnetwork/network.go | 242 ++++-- .../libnetwork/resolvconf/dns/resolvconf.go | 2 +- .../github.com/docker/libnetwork/resolver.go | 208 +++++ .../github.com/docker/libnetwork/sandbox.go | 233 +++++- .../docker/libnetwork/sandbox_externalkey.go | 175 +---- .../libnetwork/sandbox_externalkey_unix.go | 177 +++++ .../libnetwork/sandbox_externalkey_windows.go | 45 ++ vendor/github.com/docker/libnetwork/store.go | 3 +- .../docker/libtrust/CONTRIBUTING.md | 13 - vendor/github.com/docker/libtrust/LICENSE | 191 ----- vendor/github.com/docker/libtrust/MAINTAINERS | 3 - vendor/github.com/docker/libtrust/README.md | 18 - .../docker/libtrust/certificates.go | 175 ----- .../docker/libtrust/certificates_test.go | 111 --- vendor/github.com/docker/libtrust/doc.go | 9 - vendor/github.com/docker/libtrust/ec_key.go | 428 ----------- .../github.com/docker/libtrust/ec_key_test.go | 157 ---- vendor/github.com/docker/libtrust/filter.go | 50 -- .../github.com/docker/libtrust/filter_test.go | 81 -- vendor/github.com/docker/libtrust/hash.go | 56 -- vendor/github.com/docker/libtrust/jsonsign.go | 657 ---------------- .../docker/libtrust/jsonsign_test.go | 380 ---------- vendor/github.com/docker/libtrust/key.go | 253 ------- .../github.com/docker/libtrust/key_files.go | 255 ------- .../docker/libtrust/key_files_test.go | 220 ------ .../github.com/docker/libtrust/key_manager.go | 175 ----- vendor/github.com/docker/libtrust/key_test.go | 80 -- vendor/github.com/docker/libtrust/rsa_key.go | 427 ----------- .../docker/libtrust/rsa_key_test.go | 157 ---- vendor/github.com/docker/libtrust/util.go | 363 --------- .../github.com/docker/libtrust/util_test.go | 45 -- 137 files changed, 4898 insertions(+), 8553 deletions(-) create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_solaris.go create mode 100644 vendor/github.com/codegangsta/cli/cli_test.go delete mode 100644 vendor/github.com/codegangsta/cli/help_test.go create mode 100755 vendor/github.com/docker/distribution/coverpkg.sh delete mode 100644 vendor/github.com/docker/distribution/digest/tarsum.go delete mode 100644 vendor/github.com/docker/distribution/digest/tarsum_test.go delete mode 100644 vendor/github.com/docker/distribution/manifest/doc.go delete mode 100644 vendor/github.com/docker/distribution/manifest/schema1/manifest.go delete mode 100644 vendor/github.com/docker/distribution/manifest/schema1/manifest_test.go delete mode 100644 vendor/github.com/docker/distribution/manifest/schema1/sign.go delete mode 100644 vendor/github.com/docker/distribution/manifest/schema1/verify.go delete mode 100644 vendor/github.com/docker/distribution/manifest/versioned.go create mode 100644 vendor/github.com/docker/distribution/manifests.go create mode 100644 vendor/github.com/docker/distribution/registry/client/errors_test.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/revisionstore.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go create mode 100644 vendor/github.com/docker/distribution/registry/storage/tagstore_test.go create mode 100644 vendor/github.com/docker/distribution/tags.go create mode 100644 vendor/github.com/docker/libnetwork/.dockerignore create mode 100644 vendor/github.com/docker/libnetwork/CHANGELOG.md create mode 100644 vendor/github.com/docker/libnetwork/Dockerfile.build create mode 100755 vendor/github.com/docker/libnetwork/machines create mode 100644 vendor/github.com/docker/libnetwork/resolver.go create mode 100644 vendor/github.com/docker/libnetwork/sandbox_externalkey_unix.go create mode 100644 vendor/github.com/docker/libnetwork/sandbox_externalkey_windows.go delete mode 100644 vendor/github.com/docker/libtrust/CONTRIBUTING.md delete mode 100644 vendor/github.com/docker/libtrust/LICENSE delete mode 100644 vendor/github.com/docker/libtrust/MAINTAINERS delete mode 100644 vendor/github.com/docker/libtrust/README.md delete mode 100644 vendor/github.com/docker/libtrust/certificates.go delete mode 100644 vendor/github.com/docker/libtrust/certificates_test.go delete mode 100644 vendor/github.com/docker/libtrust/doc.go delete mode 100644 vendor/github.com/docker/libtrust/ec_key.go delete mode 100644 vendor/github.com/docker/libtrust/ec_key_test.go delete mode 100644 vendor/github.com/docker/libtrust/filter.go delete mode 100644 vendor/github.com/docker/libtrust/filter_test.go delete mode 100644 vendor/github.com/docker/libtrust/hash.go delete mode 100644 vendor/github.com/docker/libtrust/jsonsign.go delete mode 100644 vendor/github.com/docker/libtrust/jsonsign_test.go delete mode 100644 vendor/github.com/docker/libtrust/key.go delete mode 100644 vendor/github.com/docker/libtrust/key_files.go delete mode 100644 vendor/github.com/docker/libtrust/key_files_test.go delete mode 100644 vendor/github.com/docker/libtrust/key_manager.go delete mode 100644 vendor/github.com/docker/libtrust/key_test.go delete mode 100644 vendor/github.com/docker/libtrust/rsa_key.go delete mode 100644 vendor/github.com/docker/libtrust/rsa_key_test.go delete mode 100644 vendor/github.com/docker/libtrust/util.go delete mode 100644 vendor/github.com/docker/libtrust/util_test.go diff --git a/trash.yml b/trash.yml index 25365f01..6da52a0f 100644 --- a/trash.yml +++ b/trash.yml @@ -2,13 +2,13 @@ package: github.com/rancher/os import: - package: github.com/Sirupsen/logrus - version: v0.8.7 + version: v0.9.0 - package: github.com/cloudfoundry-incubator/candiedyaml version: 55a459c2d9da2b078f0725e5fb324823b2c71702 - package: github.com/codegangsta/cli - version: 0302d3914d2a6ad61404584cdae6e6dbc9c03599 + version: v1.2.0 - package: github.com/coreos/coreos-cloudinit version: 65031e1ab2d3574544d26f5b5d7ddddd0032fd00 @@ -21,7 +21,7 @@ import: version: 6b16a5714269b2f70720a45406b1babd947a17ef - package: github.com/docker/distribution - version: c6c9194e9c6097f84b0ff468a741086ff7704aa3 + version: v2.3.0 - package: github.com/docker/docker version: 58b270c338e831ac6668a29788c72d202f9fc251 @@ -33,7 +33,7 @@ import: version: 83a102cc68a09d890cce3b6c2e5c14c49e6373a0 - package: github.com/docker/libnetwork - version: 5305ea570b85d61dd0fd261cd7e1680da1884678 + version: v0.5.6 - package: github.com/docker/libtrust version: 9cbd2a1374f46905c68a4eb3694a130610adc62a diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml index 2d8c0866..ff23150d 100644 --- a/vendor/github.com/Sirupsen/logrus/.travis.yml +++ b/vendor/github.com/Sirupsen/logrus/.travis.yml @@ -1,8 +1,9 @@ language: go go: - - 1.2 - 1.3 - 1.4 + - 1.5 - tip install: - go get -t ./... +script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./... diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md index 78f98959..9e9e6009 100644 --- a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -1,3 +1,15 @@ +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + # 0.8.7 * logrus/core: fix possible race (#216) diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md index 6fa6e206..f8720c9a 100644 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -75,17 +75,12 @@ package main import ( "os" log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" ) func init() { // Log as JSON instead of the default ASCII formatter. log.SetFormatter(&log.JSONFormatter{}) - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) - // Output to stderr instead of stdout, could also be a file. log.SetOutput(os.Stderr) @@ -182,13 +177,16 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in ```go import ( log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" "log/syslog" ) func init() { - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") if err != nil { @@ -198,20 +196,21 @@ func init() { } } ``` - +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). | Hook | Description | | ----- | ----------- | -| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | | [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. | +| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | | [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | | [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | | [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | | [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | | [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | | [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | | [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | @@ -219,6 +218,10 @@ func init() { | [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | | [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | | [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | +| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | +| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | #### Level logging @@ -296,15 +299,16 @@ The built-in logging formatters are: field to `true`. To force no colored output even if there is a TTY set the `DisableColors` field to `true` * `logrus.JSONFormatter`. Logs fields as JSON. -* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). +* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events. ```go - logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"}) + logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"}) ``` Third party logging formatters: -* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. You can define your formatter by implementing the `Formatter` interface, requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a @@ -353,5 +357,10 @@ Log rotation is not provided with Logrus. Log rotation should be done by an external program (like `logrotate(8)`) that can compress and delete old log entries. It should not be a feature of the application-level logger. +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| [godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go index fd9804c6..2fdb2317 100644 --- a/vendor/github.com/Sirupsen/logrus/logger.go +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -64,6 +64,12 @@ func (logger *Logger) WithFields(fields Fields) *Entry { return NewEntry(logger).WithFields(fields) } +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + return NewEntry(logger).WithError(err) +} + func (logger *Logger) Debugf(format string, args ...interface{}) { if logger.Level >= DebugLevel { NewEntry(logger).Debugf(format, args...) diff --git a/vendor/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/Sirupsen/logrus/logrus_test.go index efaacea2..b7d9302d 100644 --- a/vendor/github.com/Sirupsen/logrus/logrus_test.go +++ b/vendor/github.com/Sirupsen/logrus/logrus_test.go @@ -299,3 +299,18 @@ func TestGetSetLevelRace(t *testing.T) { } wg.Wait() } + +func TestLoggingRace(t *testing.T) { + logger := New() + + var wg sync.WaitGroup + wg.Add(100) + + for i := 0; i < 100; i++ { + go func() { + logger.Info("info") + wg.Done() + }() + } + wg.Wait() +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go index 4bb53760..b343b3a3 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go +++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -12,9 +12,9 @@ import ( "unsafe" ) -// IsTerminal returns true if the given file descriptor is a terminal. +// IsTerminal returns true if stderr's file descriptor is a terminal. func IsTerminal() bool { - fd := syscall.Stdout + fd := syscall.Stderr var termios Termios _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) return err == 0 diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go new file mode 100644 index 00000000..3e70bf7b --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go @@ -0,0 +1,15 @@ +// +build solaris + +package logrus + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go index 2e09f6f7..0146845d 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_windows.go +++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go @@ -18,9 +18,9 @@ var ( procGetConsoleMode = kernel32.NewProc("GetConsoleMode") ) -// IsTerminal returns true if the given file descriptor is a terminal. +// IsTerminal returns true if stderr's file descriptor is a terminal. func IsTerminal() bool { - fd := syscall.Stdout + fd := syscall.Stderr var st uint32 r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) return r != 0 && e == 0 diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go index 17cc2984..06ef2023 100644 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -84,7 +84,9 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) } f.appendKeyValue(b, "level", entry.Level.String()) - f.appendKeyValue(b, "msg", entry.Message) + if entry.Message != "" { + f.appendKeyValue(b, "msg", entry.Message) + } for _, key := range keys { f.appendKeyValue(b, key, entry.Data[key]) } diff --git a/vendor/github.com/codegangsta/cli/.travis.yml b/vendor/github.com/codegangsta/cli/.travis.yml index 87ba52f9..baf46abc 100644 --- a/vendor/github.com/codegangsta/cli/.travis.yml +++ b/vendor/github.com/codegangsta/cli/.travis.yml @@ -1,18 +1,5 @@ language: go -sudo: false - -go: -- 1.0.3 -- 1.1.2 -- 1.2.2 -- 1.3.3 -- 1.4.2 -- 1.5.1 -- tip - -matrix: - allow_failures: - - go: tip +go: 1.1 script: - go vet ./... diff --git a/vendor/github.com/codegangsta/cli/README.md b/vendor/github.com/codegangsta/cli/README.md index 26a18386..2453c1af 100644 --- a/vendor/github.com/codegangsta/cli/README.md +++ b/vendor/github.com/codegangsta/cli/README.md @@ -1,30 +1,31 @@ -[![Coverage](http://gocover.io/_badge/github.com/codegangsta/cli?0)](http://gocover.io/github.com/codegangsta/cli) [![Build Status](https://travis-ci.org/codegangsta/cli.png?branch=master)](https://travis-ci.org/codegangsta/cli) -[![GoDoc](https://godoc.org/github.com/codegangsta/cli?status.svg)](https://godoc.org/github.com/codegangsta/cli) # cli.go -`cli.go` is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. +cli.go is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way. + +You can view the API docs here: +http://godoc.org/github.com/codegangsta/cli ## Overview Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app. -**This is where `cli.go` comes into play.** `cli.go` makes command line programming fun, organized, and expressive! +This is where cli.go comes into play. cli.go makes command line programming fun, organized, and expressive! ## Installation -Make sure you have a working Go environment (go 1.1+ is *required*). [See the install instructions](http://golang.org/doc/install.html). +Make sure you have a working Go environment (go 1.1 is *required*). [See the install instructions](http://golang.org/doc/install.html). -To install `cli.go`, simply run: +To install cli.go, simply run: ``` $ go get github.com/codegangsta/cli ``` -Make sure your `PATH` includes to the `$GOPATH/bin` directory so your commands can be easily used: +Make sure your PATH includes to the `$GOPATH/bin` directory so your commands can be easily used: ``` export PATH=$PATH:$GOPATH/bin ``` ## Getting Started -One of the philosophies behind `cli.go` is that an API should be playful and full of discovery. So a `cli.go` app can be as little as one line of code in `main()`. +One of the philosophies behind cli.go is that an API should be playful and full of discovery. So a cli.go app can be as little as one line of code in `main()`. ``` go package main @@ -67,9 +68,8 @@ Running this already gives you a ton of functionality, plus support for things l Being a programmer can be a lonely job. Thankfully by the power of automation that is not the case! Let's create a greeter app to fend off our demons of loneliness! -Start by creating a directory named `greet`, and within it, add a file, `greet.go` with the following code in it: - ``` go +/* greet.go */ package main import ( @@ -84,7 +84,7 @@ func main() { app.Action = func(c *cli.Context) { println("Hello friend!") } - + app.Run(os.Args) } ``` @@ -102,8 +102,7 @@ $ greet Hello friend! ``` -`cli.go` also generates neat help text: - +cli.go also generates some bitchass help text: ``` $ greet help NAME: @@ -123,7 +122,7 @@ GLOBAL OPTIONS ``` ### Arguments -You can lookup arguments by calling the `Args` function on `cli.Context`. +You can lookup arguments by calling the `Args` function on cli.Context. ``` go ... @@ -158,37 +157,9 @@ app.Action = func(c *cli.Context) { ... ``` -You can also set a destination variable for a flag, to which the content will be scanned. -``` go -... -var language string -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang", - Value: "english", - Usage: "language for the greeting", - Destination: &language, - }, -} -app.Action = func(c *cli.Context) { - name := "someone" - if len(c.Args()) > 0 { - name = c.Args()[0] - } - if language == "spanish" { - println("Hola", name) - } else { - println("Hello", name) - } -} -... -``` - -See full list of flags at http://godoc.org/github.com/codegangsta/cli - #### Alternate Names -You can set alternate (or short) names for flags by providing a comma-delimited list for the `Name`. e.g. +You can set alternate (or short) names for flags by providing a comma-delimited list for the Name. e.g. ``` go app.Flags = []cli.Flag { @@ -200,11 +171,9 @@ app.Flags = []cli.Flag { } ``` -That flag can then be set with `--lang spanish` or `-l spanish`. Note that giving two different forms of the same flag in the same command invocation is an error. - #### Values from the Environment -You can also have the default value set from the environment via `EnvVar`. e.g. +You can also have the default value set from the environment via EnvVar. e.g. ``` go app.Flags = []cli.Flag { @@ -217,18 +186,7 @@ app.Flags = []cli.Flag { } ``` -The `EnvVar` may also be given as a comma-delimited "cascade", where the first environment variable that resolves is used as the default. - -``` go -app.Flags = []cli.Flag { - cli.StringFlag{ - Name: "lang, l", - Value: "english", - Usage: "language for the greeting", - EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG", - }, -} -``` +That flag can then be set with `--lang spanish` or `-l spanish`. Note that giving two different forms of the same flag in the same command invocation is an error. ### Subcommands @@ -238,7 +196,7 @@ Subcommands can be defined for a more git-like command line app. app.Commands = []cli.Command{ { Name: "add", - Aliases: []string{"a"}, + ShortName: "a", Usage: "add a task to the list", Action: func(c *cli.Context) { println("added task: ", c.Args().First()) @@ -246,7 +204,7 @@ app.Commands = []cli.Command{ }, { Name: "complete", - Aliases: []string{"c"}, + ShortName: "c", Usage: "complete a task on the list", Action: func(c *cli.Context) { println("completed task: ", c.Args().First()) @@ -254,7 +212,7 @@ app.Commands = []cli.Command{ }, { Name: "template", - Aliases: []string{"r"}, + ShortName: "r", Usage: "options for task templates", Subcommands: []cli.Command{ { @@ -272,15 +230,15 @@ app.Commands = []cli.Command{ }, }, }, - }, + }, } ... ``` ### Bash Completion -You can enable completion commands by setting the `EnableBashCompletion` -flag on the `App` object. By default, this setting will only auto-complete to +You can enable completion commands by setting the EnableBashCompletion +flag on the App object. By default, this setting will only auto-complete to show an app's subcommands, but you can write your own completion methods for the App or its subcommands. ```go @@ -290,8 +248,8 @@ app := cli.NewApp() app.EnableBashCompletion = true app.Commands = []cli.Command{ { - Name: "complete", - Aliases: []string{"c"}, + Name: "complete", + ShortName: "c", Usage: "complete a task on the list", Action: func(c *cli.Context) { println("completed task: ", c.Args().First()) @@ -302,7 +260,7 @@ app.Commands = []cli.Command{ return } for _, t := range tasks { - fmt.Println(t) + println(t) } }, } @@ -312,30 +270,11 @@ app.Commands = []cli.Command{ #### To Enable -Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while -setting the `PROG` variable to the name of your program: +Source the autocomplete/bash_autocomplete file in your .bashrc file while +setting the PROG variable to the name of your program: `PROG=myprogram source /.../cli/autocomplete/bash_autocomplete` -#### To Distribute -Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename -it to the name of the program you wish to add autocomplete support for (or -automatically install it there if you are distributing a package). Don't forget -to source the file to make it active in the current shell. - -``` - sudo cp src/bash_autocomplete /etc/bash_completion.d/ - source /etc/bash_completion.d/ -``` - -Alternatively, you can just document that users should source the generic -`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set -to the name of their program (as above). - -## Contribution Guidelines -Feel free to put up a pull request to fix a bug or maybe add a feature. I will give it a code review and make sure that it does not break backwards compatibility. If I or any other collaborators agree that it is in line with the vision of the project, we will work with you to get the code into a mergeable state and merge it into the master branch. - -If you have contributed something significant to the project, I will most likely add you as a collaborator. As a collaborator you are given the ability to merge others pull requests. It is very important that new code does not break existing code, so be careful about what code you do choose to merge. If you have any questions feel free to link @codegangsta to the issue in question and we can review it together. - -If you feel like you have contributed to the project but have not yet been added as a collaborator, I probably forgot to add you. Hit @codegangsta up over email and we will get it figured out. +## About +cli.go is written by none other than the [Code Gangsta](http://codegangsta.io) diff --git a/vendor/github.com/codegangsta/cli/app.go b/vendor/github.com/codegangsta/cli/app.go index 9a15c0c0..e193b828 100644 --- a/vendor/github.com/codegangsta/cli/app.go +++ b/vendor/github.com/codegangsta/cli/app.go @@ -2,23 +2,18 @@ package cli import ( "fmt" - "io" "io/ioutil" "os" "time" ) // App is the main structure of a cli application. It is recomended that -// an app be created with the cli.NewApp() function +// and app be created with the cli.NewApp() function type App struct { // The name of the program. Defaults to os.Args[0] Name string - // Full name of command for help, defaults to Name - HelpName string // Description of the program. Usage string - // Description of the program argument format. - ArgsUsage string // Version of the program Version string // List of commands to execute @@ -29,32 +24,21 @@ type App struct { EnableBashCompletion bool // Boolean to hide built-in help command HideHelp bool - // Boolean to hide built-in version flag - HideVersion bool // An action to execute when the bash-completion flag is set BashComplete func(context *Context) // An action to execute before any subcommands are run, but after the context is ready // If a non-nil error is returned, no subcommands are run Before func(context *Context) error - // An action to execute after any subcommands are run, but after the subcommand has finished - // It is run even if Action() panics - After func(context *Context) error // The action to execute when no subcommands are specified Action func(context *Context) // Execute this function if the proper command cannot be found CommandNotFound func(context *Context, command string) // Compilation date Compiled time.Time - // List of all authors who contributed - Authors []Author - // Copyright of the binary if any - Copyright string - // Name of Author (Note: Use App.Authors, this is deprecated) + // Author Author string - // Email of Author (Note: Use App.Authors, this is deprecated) + // Author e-mail Email string - // Writer writer to write output to - Writer io.Writer } // Tries to find out when this binary was compiled. @@ -71,65 +55,48 @@ func compileTime() time.Time { func NewApp() *App { return &App{ Name: os.Args[0], - HelpName: os.Args[0], Usage: "A new cli application", Version: "0.0.0", BashComplete: DefaultAppComplete, Action: helpCommand.Action, Compiled: compileTime(), - Writer: os.Stdout, + Author: "Author", + Email: "unknown@email", } } // Entry point to the cli app. Parses the arguments slice and routes to the proper flag/args combination -func (a *App) Run(arguments []string) (err error) { - if a.Author != "" || a.Email != "" { - a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email}) - } - - newCmds := []Command{} - for _, c := range a.Commands { - if c.HelpName == "" { - c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) - } - newCmds = append(newCmds, c) - } - a.Commands = newCmds - +func (a *App) Run(arguments []string) error { // append help to commands if a.Command(helpCommand.Name) == nil && !a.HideHelp { a.Commands = append(a.Commands, helpCommand) - if (HelpFlag != BoolFlag{}) { - a.appendFlag(HelpFlag) - } + a.appendFlag(HelpFlag) } //append version/help flags if a.EnableBashCompletion { a.appendFlag(BashCompletionFlag) } - - if !a.HideVersion { - a.appendFlag(VersionFlag) - } + a.appendFlag(VersionFlag) // parse flags set := flagSet(a.Name, a.Flags) set.SetOutput(ioutil.Discard) - err = set.Parse(arguments[1:]) + err := set.Parse(arguments[1:]) nerr := normalizeFlags(a.Flags, set) if nerr != nil { - fmt.Fprintln(a.Writer, nerr) - context := NewContext(a, set, nil) + fmt.Println(nerr) + context := NewContext(a, set, set) ShowAppHelp(context) + fmt.Println("") return nerr } - context := NewContext(a, set, nil) + context := NewContext(a, set, set) if err != nil { - fmt.Fprintln(a.Writer, "Incorrect Usage.") - fmt.Fprintln(a.Writer) + fmt.Printf("Incorrect Usage.\n\n") ShowAppHelp(context) + fmt.Println("") return err } @@ -137,29 +104,14 @@ func (a *App) Run(arguments []string) (err error) { return nil } - if !a.HideHelp && checkHelp(context) { - ShowAppHelp(context) + if checkHelp(context) { return nil } - if !a.HideVersion && checkVersion(context) { - ShowVersion(context) + if checkVersion(context) { return nil } - if a.After != nil { - defer func() { - afterErr := a.After(context) - if afterErr != nil { - if err != nil { - err = NewMultiError(err, afterErr) - } else { - err = afterErr - } - } - }() - } - if a.Before != nil { err := a.Before(context) if err != nil { @@ -184,32 +136,21 @@ func (a *App) Run(arguments []string) (err error) { // Another entry point to the cli app, takes care of passing arguments and error handling func (a *App) RunAndExitOnError() { if err := a.Run(os.Args); err != nil { - fmt.Fprintln(os.Stderr, err) + os.Stderr.WriteString(fmt.Sprintln(err)) os.Exit(1) } } // Invokes the subcommand given the context, parses ctx.Args() to generate command-specific flags -func (a *App) RunAsSubcommand(ctx *Context) (err error) { +func (a *App) RunAsSubcommand(ctx *Context) error { // append help to commands if len(a.Commands) > 0 { if a.Command(helpCommand.Name) == nil && !a.HideHelp { a.Commands = append(a.Commands, helpCommand) - if (HelpFlag != BoolFlag{}) { - a.appendFlag(HelpFlag) - } + a.appendFlag(HelpFlag) } } - newCmds := []Command{} - for _, c := range a.Commands { - if c.HelpName == "" { - c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name) - } - newCmds = append(newCmds, c) - } - a.Commands = newCmds - // append flags if a.EnableBashCompletion { a.appendFlag(BashCompletionFlag) @@ -218,24 +159,23 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { // parse flags set := flagSet(a.Name, a.Flags) set.SetOutput(ioutil.Discard) - err = set.Parse(ctx.Args().Tail()) + err := set.Parse(ctx.Args().Tail()) nerr := normalizeFlags(a.Flags, set) - context := NewContext(a, set, ctx) + context := NewContext(a, set, ctx.globalSet) if nerr != nil { - fmt.Fprintln(a.Writer, nerr) - fmt.Fprintln(a.Writer) + fmt.Println(nerr) if len(a.Commands) > 0 { ShowSubcommandHelp(context) } else { ShowCommandHelp(ctx, context.Args().First()) } + fmt.Println("") return nerr } if err != nil { - fmt.Fprintln(a.Writer, "Incorrect Usage.") - fmt.Fprintln(a.Writer) + fmt.Printf("Incorrect Usage.\n\n") ShowSubcommandHelp(context) return err } @@ -254,19 +194,6 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { } } - if a.After != nil { - defer func() { - afterErr := a.After(context) - if afterErr != nil { - if err != nil { - err = NewMultiError(err, afterErr) - } else { - err = afterErr - } - } - }() - } - if a.Before != nil { err := a.Before(context) if err != nil { @@ -284,7 +211,11 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) { } // Run default Action - a.Action(context) + if len(a.Commands) > 0 { + a.Action(context) + } else { + a.Action(ctx) + } return nil } @@ -315,19 +246,3 @@ func (a *App) appendFlag(flag Flag) { a.Flags = append(a.Flags, flag) } } - -// Author represents someone who has contributed to a cli project. -type Author struct { - Name string // The Authors name - Email string // The Authors email -} - -// String makes Author comply to the Stringer interface, to allow an easy print in the templating process -func (a Author) String() string { - e := "" - if a.Email != "" { - e = "<" + a.Email + "> " - } - - return fmt.Sprintf("%v %v", a.Name, e) -} diff --git a/vendor/github.com/codegangsta/cli/app_test.go b/vendor/github.com/codegangsta/cli/app_test.go index 28d8e0f1..d4216fee 100644 --- a/vendor/github.com/codegangsta/cli/app_test.go +++ b/vendor/github.com/codegangsta/cli/app_test.go @@ -1,60 +1,55 @@ -package cli +package cli_test import ( - "bytes" - "flag" "fmt" - "io" "os" - "strings" "testing" + + "github.com/codegangsta/cli" ) -func ExampleApp_Run() { +func ExampleApp() { // set args for examples sake os.Args = []string{"greet", "--name", "Jeremy"} - app := NewApp() + app := cli.NewApp() app.Name = "greet" - app.Flags = []Flag{ - StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, + app.Flags = []cli.Flag{ + cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, } - app.Action = func(c *Context) { + app.Action = func(c *cli.Context) { fmt.Printf("Hello %v\n", c.String("name")) } - app.Author = "Harrison" - app.Email = "harrison@lolwut.com" - app.Authors = []Author{Author{Name: "Oliver Allen", Email: "oliver@toyshop.com"}} app.Run(os.Args) // Output: // Hello Jeremy } -func ExampleApp_Run_subcommand() { +func ExampleAppSubcommand() { // set args for examples sake os.Args = []string{"say", "hi", "english", "--name", "Jeremy"} - app := NewApp() + app := cli.NewApp() app.Name = "say" - app.Commands = []Command{ + app.Commands = []cli.Command{ { Name: "hello", - Aliases: []string{"hi"}, + ShortName: "hi", Usage: "use it to see a description", Description: "This is how we describe hello the function", - Subcommands: []Command{ + Subcommands: []cli.Command{ { Name: "english", - Aliases: []string{"en"}, + ShortName: "en", Usage: "sends a greeting in english", Description: "greets someone in english", - Flags: []Flag{ - StringFlag{ + Flags: []cli.Flag{ + cli.StringFlag{ Name: "name", Value: "Bob", Usage: "Name of the person to greet", }, }, - Action: func(c *Context) { + Action: func(c *cli.Context) { fmt.Println("Hello,", c.String("name")) }, }, @@ -67,22 +62,22 @@ func ExampleApp_Run_subcommand() { // Hello, Jeremy } -func ExampleApp_Run_help() { +func ExampleAppHelp() { // set args for examples sake os.Args = []string{"greet", "h", "describeit"} - app := NewApp() + app := cli.NewApp() app.Name = "greet" - app.Flags = []Flag{ - StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, + app.Flags = []cli.Flag{ + cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"}, } - app.Commands = []Command{ + app.Commands = []cli.Command{ { Name: "describeit", - Aliases: []string{"d"}, + ShortName: "d", Usage: "use it to see a description", Description: "This is how we describe describeit the function", - Action: func(c *Context) { + Action: func(c *cli.Context) { fmt.Printf("i like to describe things") }, }, @@ -90,36 +85,36 @@ func ExampleApp_Run_help() { app.Run(os.Args) // Output: // NAME: - // greet describeit - use it to see a description + // describeit - use it to see a description // // USAGE: - // greet describeit [arguments...] + // command describeit [arguments...] // // DESCRIPTION: // This is how we describe describeit the function } -func ExampleApp_Run_bashComplete() { +func ExampleAppBashComplete() { // set args for examples sake os.Args = []string{"greet", "--generate-bash-completion"} - app := NewApp() + app := cli.NewApp() app.Name = "greet" app.EnableBashCompletion = true - app.Commands = []Command{ + app.Commands = []cli.Command{ { Name: "describeit", - Aliases: []string{"d"}, + ShortName: "d", Usage: "use it to see a description", Description: "This is how we describe describeit the function", - Action: func(c *Context) { + Action: func(c *cli.Context) { fmt.Printf("i like to describe things") }, }, { Name: "next", Usage: "next example", Description: "more stuff to see when generating bash completion", - Action: func(c *Context) { + Action: func(c *cli.Context) { fmt.Printf("the next example") }, }, @@ -137,8 +132,8 @@ func ExampleApp_Run_bashComplete() { func TestApp_Run(t *testing.T) { s := "" - app := NewApp() - app.Action = func(c *Context) { + app := cli.NewApp() + app.Action = func(c *cli.Context) { s = s + c.Args().First() } @@ -162,10 +157,10 @@ var commandAppTests = []struct { } func TestApp_Command(t *testing.T) { - app := NewApp() - fooCommand := Command{Name: "foobar", Aliases: []string{"f"}} - batCommand := Command{Name: "batbaz", Aliases: []string{"b"}} - app.Commands = []Command{ + app := cli.NewApp() + fooCommand := cli.Command{Name: "foobar", ShortName: "f"} + batCommand := cli.Command{Name: "batbaz", ShortName: "b"} + app.Commands = []cli.Command{ fooCommand, batCommand, } @@ -178,18 +173,18 @@ func TestApp_Command(t *testing.T) { func TestApp_CommandWithArgBeforeFlags(t *testing.T) { var parsedOption, firstArg string - app := NewApp() - command := Command{ + app := cli.NewApp() + command := cli.Command{ Name: "cmd", - Flags: []Flag{ - StringFlag{Name: "option", Value: "", Usage: "some option"}, + Flags: []cli.Flag{ + cli.StringFlag{Name: "option", Value: "", Usage: "some option"}, }, - Action: func(c *Context) { + Action: func(c *cli.Context) { parsedOption = c.String("option") firstArg = c.Args().First() }, } - app.Commands = []Command{command} + app.Commands = []cli.Command{command} app.Run([]string{"", "cmd", "my-arg", "--option", "my-option"}) @@ -197,84 +192,14 @@ func TestApp_CommandWithArgBeforeFlags(t *testing.T) { expect(t, firstArg, "my-arg") } -func TestApp_RunAsSubcommandParseFlags(t *testing.T) { - var context *Context - - a := NewApp() - a.Commands = []Command{ - { - Name: "foo", - Action: func(c *Context) { - context = c - }, - Flags: []Flag{ - StringFlag{ - Name: "lang", - Value: "english", - Usage: "language for the greeting", - }, - }, - Before: func(_ *Context) error { return nil }, - }, - } - a.Run([]string{"", "foo", "--lang", "spanish", "abcd"}) - - expect(t, context.Args().Get(0), "abcd") - expect(t, context.String("lang"), "spanish") -} - -func TestApp_CommandWithFlagBeforeTerminator(t *testing.T) { - var parsedOption string - var args []string - - app := NewApp() - command := Command{ - Name: "cmd", - Flags: []Flag{ - StringFlag{Name: "option", Value: "", Usage: "some option"}, - }, - Action: func(c *Context) { - parsedOption = c.String("option") - args = c.Args() - }, - } - app.Commands = []Command{command} - - app.Run([]string{"", "cmd", "my-arg", "--option", "my-option", "--", "--notARealFlag"}) - - expect(t, parsedOption, "my-option") - expect(t, args[0], "my-arg") - expect(t, args[1], "--") - expect(t, args[2], "--notARealFlag") -} - -func TestApp_CommandWithNoFlagBeforeTerminator(t *testing.T) { - var args []string - - app := NewApp() - command := Command{ - Name: "cmd", - Action: func(c *Context) { - args = c.Args() - }, - } - app.Commands = []Command{command} - - app.Run([]string{"", "cmd", "my-arg", "--", "notAFlagAtAll"}) - - expect(t, args[0], "my-arg") - expect(t, args[1], "--") - expect(t, args[2], "notAFlagAtAll") -} - func TestApp_Float64Flag(t *testing.T) { var meters float64 - app := NewApp() - app.Flags = []Flag{ - Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"}, + app := cli.NewApp() + app.Flags = []cli.Flag{ + cli.Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"}, } - app.Action = func(c *Context) { + app.Action = func(c *cli.Context) { meters = c.Float64("height") } @@ -287,21 +212,21 @@ func TestApp_ParseSliceFlags(t *testing.T) { var parsedIntSlice []int var parsedStringSlice []string - app := NewApp() - command := Command{ + app := cli.NewApp() + command := cli.Command{ Name: "cmd", - Flags: []Flag{ - IntSliceFlag{Name: "p", Value: &IntSlice{}, Usage: "set one or more ip addr"}, - StringSliceFlag{Name: "ip", Value: &StringSlice{}, Usage: "set one or more ports to open"}, + Flags: []cli.Flag{ + cli.IntSliceFlag{Name: "p", Value: &cli.IntSlice{}, Usage: "set one or more ip addr"}, + cli.StringSliceFlag{Name: "ip", Value: &cli.StringSlice{}, Usage: "set one or more ports to open"}, }, - Action: func(c *Context) { + Action: func(c *cli.Context) { parsedIntSlice = c.IntSlice("p") parsedStringSlice = c.StringSlice("ip") parsedOption = c.String("option") firstArg = c.Args().First() }, } - app.Commands = []Command{command} + app.Commands = []cli.Command{command} app.Run([]string{"", "cmd", "my-arg", "-p", "22", "-p", "80", "-ip", "8.8.8.8", "-ip", "8.8.4.4"}) @@ -340,90 +265,14 @@ func TestApp_ParseSliceFlags(t *testing.T) { } } -func TestApp_ParseSliceFlagsWithMissingValue(t *testing.T) { - var parsedIntSlice []int - var parsedStringSlice []string - - app := NewApp() - command := Command{ - Name: "cmd", - Flags: []Flag{ - IntSliceFlag{Name: "a", Usage: "set numbers"}, - StringSliceFlag{Name: "str", Usage: "set strings"}, - }, - Action: func(c *Context) { - parsedIntSlice = c.IntSlice("a") - parsedStringSlice = c.StringSlice("str") - }, - } - app.Commands = []Command{command} - - app.Run([]string{"", "cmd", "my-arg", "-a", "2", "-str", "A"}) - - var expectedIntSlice = []int{2} - var expectedStringSlice = []string{"A"} - - if parsedIntSlice[0] != expectedIntSlice[0] { - t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0]) - } - - if parsedStringSlice[0] != expectedStringSlice[0] { - t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0]) - } -} - -func TestApp_DefaultStdout(t *testing.T) { - app := NewApp() - - if app.Writer != os.Stdout { - t.Error("Default output writer not set.") - } -} - -type mockWriter struct { - written []byte -} - -func (fw *mockWriter) Write(p []byte) (n int, err error) { - if fw.written == nil { - fw.written = p - } else { - fw.written = append(fw.written, p...) - } - - return len(p), nil -} - -func (fw *mockWriter) GetWritten() (b []byte) { - return fw.written -} - -func TestApp_SetStdout(t *testing.T) { - w := &mockWriter{} - - app := NewApp() - app.Name = "test" - app.Writer = w - - err := app.Run([]string{"help"}) - - if err != nil { - t.Fatalf("Run error: %s", err) - } - - if len(w.written) == 0 { - t.Error("App did not write output to desired writer.") - } -} - func TestApp_BeforeFunc(t *testing.T) { beforeRun, subcommandRun := false, false beforeError := fmt.Errorf("fail") var err error - app := NewApp() + app := cli.NewApp() - app.Before = func(c *Context) error { + app.Before = func(c *cli.Context) error { beforeRun = true s := c.String("opt") if s == "fail" { @@ -433,17 +282,17 @@ func TestApp_BeforeFunc(t *testing.T) { return nil } - app.Commands = []Command{ - Command{ + app.Commands = []cli.Command{ + cli.Command{ Name: "sub", - Action: func(c *Context) { + Action: func(c *cli.Context) { subcommandRun = true }, }, } - app.Flags = []Flag{ - StringFlag{Name: "opt"}, + app.Flags = []cli.Flag{ + cli.StringFlag{Name: "opt"}, } // run with the Before() func succeeding @@ -482,99 +331,18 @@ func TestApp_BeforeFunc(t *testing.T) { } -func TestApp_AfterFunc(t *testing.T) { - afterRun, subcommandRun := false, false - afterError := fmt.Errorf("fail") - var err error - - app := NewApp() - - app.After = func(c *Context) error { - afterRun = true - s := c.String("opt") - if s == "fail" { - return afterError - } - - return nil - } - - app.Commands = []Command{ - Command{ - Name: "sub", - Action: func(c *Context) { - subcommandRun = true - }, - }, - } - - app.Flags = []Flag{ - StringFlag{Name: "opt"}, - } - - // run with the After() func succeeding - err = app.Run([]string{"command", "--opt", "succeed", "sub"}) - - if err != nil { - t.Fatalf("Run error: %s", err) - } - - if afterRun == false { - t.Errorf("After() not executed when expected") - } - - if subcommandRun == false { - t.Errorf("Subcommand not executed when expected") - } - - // reset - afterRun, subcommandRun = false, false - - // run with the Before() func failing - err = app.Run([]string{"command", "--opt", "fail", "sub"}) - - // should be the same error produced by the Before func - if err != afterError { - t.Errorf("Run error expected, but not received") - } - - if afterRun == false { - t.Errorf("After() not executed when expected") - } - - if subcommandRun == false { - t.Errorf("Subcommand not executed when expected") - } -} - -func TestAppNoHelpFlag(t *testing.T) { - oldFlag := HelpFlag - defer func() { - HelpFlag = oldFlag - }() - - HelpFlag = BoolFlag{} - - app := NewApp() - err := app.Run([]string{"test", "-h"}) - - if err != flag.ErrHelp { - t.Errorf("expected error about missing help flag, but got: %s (%T)", err, err) - } -} - func TestAppHelpPrinter(t *testing.T) { - oldPrinter := HelpPrinter + oldPrinter := cli.HelpPrinter defer func() { - HelpPrinter = oldPrinter + cli.HelpPrinter = oldPrinter }() var wasCalled = false - HelpPrinter = func(w io.Writer, template string, data interface{}) { + cli.HelpPrinter = func(template string, data interface{}) { wasCalled = true } - app := NewApp() + app := cli.NewApp() app.Run([]string{"-h"}) if wasCalled == false { @@ -582,38 +350,18 @@ func TestAppHelpPrinter(t *testing.T) { } } -func TestAppVersionPrinter(t *testing.T) { - oldPrinter := VersionPrinter - defer func() { - VersionPrinter = oldPrinter - }() - - var wasCalled = false - VersionPrinter = func(c *Context) { - wasCalled = true - } - - app := NewApp() - ctx := NewContext(app, nil, nil) - ShowVersion(ctx) - - if wasCalled == false { - t.Errorf("Version printer expected to be called, but was not") - } -} - func TestAppCommandNotFound(t *testing.T) { beforeRun, subcommandRun := false, false - app := NewApp() + app := cli.NewApp() - app.CommandNotFound = func(c *Context, command string) { + app.CommandNotFound = func(c *cli.Context, command string) { beforeRun = true } - app.Commands = []Command{ - Command{ + app.Commands = []cli.Command{ + cli.Command{ Name: "bar", - Action: func(c *Context) { + Action: func(c *cli.Context) { subcommandRun = true }, }, @@ -625,336 +373,31 @@ func TestAppCommandNotFound(t *testing.T) { expect(t, subcommandRun, false) } -func TestGlobalFlag(t *testing.T) { - var globalFlag string - var globalFlagSet bool - app := NewApp() - app.Flags = []Flag{ - StringFlag{Name: "global, g", Usage: "global"}, - } - app.Action = func(c *Context) { - globalFlag = c.GlobalString("global") - globalFlagSet = c.GlobalIsSet("global") - } - app.Run([]string{"command", "-g", "foo"}) - expect(t, globalFlag, "foo") - expect(t, globalFlagSet, true) - -} - func TestGlobalFlagsInSubcommands(t *testing.T) { subcommandRun := false - parentFlag := false - app := NewApp() + app := cli.NewApp() - app.Flags = []Flag{ - BoolFlag{Name: "debug, d", Usage: "Enable debugging"}, + app.Flags = []cli.Flag{ + cli.BoolFlag{Name: "debug, d", Usage: "Enable debugging"}, } - app.Commands = []Command{ - Command{ + app.Commands = []cli.Command{ + cli.Command{ Name: "foo", - Flags: []Flag{ - BoolFlag{Name: "parent, p", Usage: "Parent flag"}, - }, - Subcommands: []Command{ + Subcommands: []cli.Command{ { Name: "bar", - Action: func(c *Context) { + Action: func(c *cli.Context) { if c.GlobalBool("debug") { subcommandRun = true } - if c.GlobalBool("parent") { - parentFlag = true - } }, }, }, }, } - app.Run([]string{"command", "-d", "foo", "-p", "bar"}) + app.Run([]string{"command", "-d", "foo", "bar"}) expect(t, subcommandRun, true) - expect(t, parentFlag, true) -} - -func TestApp_Run_CommandWithSubcommandHasHelpTopic(t *testing.T) { - var subcommandHelpTopics = [][]string{ - {"command", "foo", "--help"}, - {"command", "foo", "-h"}, - {"command", "foo", "help"}, - } - - for _, flagSet := range subcommandHelpTopics { - t.Logf("==> checking with flags %v", flagSet) - - app := NewApp() - buf := new(bytes.Buffer) - app.Writer = buf - - subCmdBar := Command{ - Name: "bar", - Usage: "does bar things", - } - subCmdBaz := Command{ - Name: "baz", - Usage: "does baz things", - } - cmd := Command{ - Name: "foo", - Description: "descriptive wall of text about how it does foo things", - Subcommands: []Command{subCmdBar, subCmdBaz}, - } - - app.Commands = []Command{cmd} - err := app.Run(flagSet) - - if err != nil { - t.Error(err) - } - - output := buf.String() - t.Logf("output: %q\n", buf.Bytes()) - - if strings.Contains(output, "No help topic for") { - t.Errorf("expect a help topic, got none: \n%q", output) - } - - for _, shouldContain := range []string{ - cmd.Name, cmd.Description, - subCmdBar.Name, subCmdBar.Usage, - subCmdBaz.Name, subCmdBaz.Usage, - } { - if !strings.Contains(output, shouldContain) { - t.Errorf("want help to contain %q, did not: \n%q", shouldContain, output) - } - } - } -} - -func TestApp_Run_SubcommandFullPath(t *testing.T) { - app := NewApp() - buf := new(bytes.Buffer) - app.Writer = buf - app.Name = "command" - subCmd := Command{ - Name: "bar", - Usage: "does bar things", - } - cmd := Command{ - Name: "foo", - Description: "foo commands", - Subcommands: []Command{subCmd}, - } - app.Commands = []Command{cmd} - - err := app.Run([]string{"command", "foo", "bar", "--help"}) - if err != nil { - t.Error(err) - } - - output := buf.String() - if !strings.Contains(output, "command foo bar - does bar things") { - t.Errorf("expected full path to subcommand: %s", output) - } - if !strings.Contains(output, "command foo bar [arguments...]") { - t.Errorf("expected full path to subcommand: %s", output) - } -} - -func TestApp_Run_SubcommandHelpName(t *testing.T) { - app := NewApp() - buf := new(bytes.Buffer) - app.Writer = buf - app.Name = "command" - subCmd := Command{ - Name: "bar", - HelpName: "custom", - Usage: "does bar things", - } - cmd := Command{ - Name: "foo", - Description: "foo commands", - Subcommands: []Command{subCmd}, - } - app.Commands = []Command{cmd} - - err := app.Run([]string{"command", "foo", "bar", "--help"}) - if err != nil { - t.Error(err) - } - - output := buf.String() - if !strings.Contains(output, "custom - does bar things") { - t.Errorf("expected HelpName for subcommand: %s", output) - } - if !strings.Contains(output, "custom [arguments...]") { - t.Errorf("expected HelpName to subcommand: %s", output) - } -} - -func TestApp_Run_CommandHelpName(t *testing.T) { - app := NewApp() - buf := new(bytes.Buffer) - app.Writer = buf - app.Name = "command" - subCmd := Command{ - Name: "bar", - Usage: "does bar things", - } - cmd := Command{ - Name: "foo", - HelpName: "custom", - Description: "foo commands", - Subcommands: []Command{subCmd}, - } - app.Commands = []Command{cmd} - - err := app.Run([]string{"command", "foo", "bar", "--help"}) - if err != nil { - t.Error(err) - } - - output := buf.String() - if !strings.Contains(output, "command foo bar - does bar things") { - t.Errorf("expected full path to subcommand: %s", output) - } - if !strings.Contains(output, "command foo bar [arguments...]") { - t.Errorf("expected full path to subcommand: %s", output) - } -} - -func TestApp_Run_CommandSubcommandHelpName(t *testing.T) { - app := NewApp() - buf := new(bytes.Buffer) - app.Writer = buf - app.Name = "base" - subCmd := Command{ - Name: "bar", - HelpName: "custom", - Usage: "does bar things", - } - cmd := Command{ - Name: "foo", - Description: "foo commands", - Subcommands: []Command{subCmd}, - } - app.Commands = []Command{cmd} - - err := app.Run([]string{"command", "foo", "--help"}) - if err != nil { - t.Error(err) - } - - output := buf.String() - if !strings.Contains(output, "base foo - foo commands") { - t.Errorf("expected full path to subcommand: %s", output) - } - if !strings.Contains(output, "base foo command [command options] [arguments...]") { - t.Errorf("expected full path to subcommand: %s", output) - } -} - -func TestApp_Run_Help(t *testing.T) { - var helpArguments = [][]string{{"boom", "--help"}, {"boom", "-h"}, {"boom", "help"}} - - for _, args := range helpArguments { - buf := new(bytes.Buffer) - - t.Logf("==> checking with arguments %v", args) - - app := NewApp() - app.Name = "boom" - app.Usage = "make an explosive entrance" - app.Writer = buf - app.Action = func(c *Context) { - buf.WriteString("boom I say!") - } - - err := app.Run(args) - if err != nil { - t.Error(err) - } - - output := buf.String() - t.Logf("output: %q\n", buf.Bytes()) - - if !strings.Contains(output, "boom - make an explosive entrance") { - t.Errorf("want help to contain %q, did not: \n%q", "boom - make an explosive entrance", output) - } - } -} - -func TestApp_Run_Version(t *testing.T) { - var versionArguments = [][]string{{"boom", "--version"}, {"boom", "-v"}} - - for _, args := range versionArguments { - buf := new(bytes.Buffer) - - t.Logf("==> checking with arguments %v", args) - - app := NewApp() - app.Name = "boom" - app.Usage = "make an explosive entrance" - app.Version = "0.1.0" - app.Writer = buf - app.Action = func(c *Context) { - buf.WriteString("boom I say!") - } - - err := app.Run(args) - if err != nil { - t.Error(err) - } - - output := buf.String() - t.Logf("output: %q\n", buf.Bytes()) - - if !strings.Contains(output, "0.1.0") { - t.Errorf("want version to contain %q, did not: \n%q", "0.1.0", output) - } - } -} - -func TestApp_Run_DoesNotOverwriteErrorFromBefore(t *testing.T) { - app := NewApp() - app.Action = func(c *Context) {} - app.Before = func(c *Context) error { return fmt.Errorf("before error") } - app.After = func(c *Context) error { return fmt.Errorf("after error") } - - err := app.Run([]string{"foo"}) - if err == nil { - t.Fatalf("expected to recieve error from Run, got none") - } - - if !strings.Contains(err.Error(), "before error") { - t.Errorf("expected text of error from Before method, but got none in \"%v\"", err) - } - if !strings.Contains(err.Error(), "after error") { - t.Errorf("expected text of error from After method, but got none in \"%v\"", err) - } -} - -func TestApp_Run_SubcommandDoesNotOverwriteErrorFromBefore(t *testing.T) { - app := NewApp() - app.Commands = []Command{ - Command{ - Name: "bar", - Before: func(c *Context) error { return fmt.Errorf("before error") }, - After: func(c *Context) error { return fmt.Errorf("after error") }, - }, - } - - err := app.Run([]string{"foo", "bar"}) - if err == nil { - t.Fatalf("expected to recieve error from Run, got none") - } - - if !strings.Contains(err.Error(), "before error") { - t.Errorf("expected text of error from Before method, but got none in \"%v\"", err) - } - if !strings.Contains(err.Error(), "after error") { - t.Errorf("expected text of error from After method, but got none in \"%v\"", err) - } } diff --git a/vendor/github.com/codegangsta/cli/cli.go b/vendor/github.com/codegangsta/cli/cli.go index 31dc9124..b7425458 100644 --- a/vendor/github.com/codegangsta/cli/cli.go +++ b/vendor/github.com/codegangsta/cli/cli.go @@ -17,24 +17,3 @@ // app.Run(os.Args) // } package cli - -import ( - "strings" -) - -type MultiError struct { - Errors []error -} - -func NewMultiError(err ...error) MultiError { - return MultiError{Errors: err} -} - -func (m MultiError) Error() string { - errs := make([]string, len(m.Errors)) - for i, err := range m.Errors { - errs[i] = err.Error() - } - - return strings.Join(errs, "\n") -} diff --git a/vendor/github.com/codegangsta/cli/cli_test.go b/vendor/github.com/codegangsta/cli/cli_test.go new file mode 100644 index 00000000..879a793d --- /dev/null +++ b/vendor/github.com/codegangsta/cli/cli_test.go @@ -0,0 +1,100 @@ +package cli_test + +import ( + "os" + + "github.com/codegangsta/cli" +) + +func Example() { + app := cli.NewApp() + app.Name = "todo" + app.Usage = "task list on the command line" + app.Commands = []cli.Command{ + { + Name: "add", + ShortName: "a", + Usage: "add a task to the list", + Action: func(c *cli.Context) { + println("added task: ", c.Args().First()) + }, + }, + { + Name: "complete", + ShortName: "c", + Usage: "complete a task on the list", + Action: func(c *cli.Context) { + println("completed task: ", c.Args().First()) + }, + }, + } + + app.Run(os.Args) +} + +func ExampleSubcommand() { + app := cli.NewApp() + app.Name = "say" + app.Commands = []cli.Command{ + { + Name: "hello", + ShortName: "hi", + Usage: "use it to see a description", + Description: "This is how we describe hello the function", + Subcommands: []cli.Command{ + { + Name: "english", + ShortName: "en", + Usage: "sends a greeting in english", + Description: "greets someone in english", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "name", + Value: "Bob", + Usage: "Name of the person to greet", + }, + }, + Action: func(c *cli.Context) { + println("Hello, ", c.String("name")) + }, + }, { + Name: "spanish", + ShortName: "sp", + Usage: "sends a greeting in spanish", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "surname", + Value: "Jones", + Usage: "Surname of the person to greet", + }, + }, + Action: func(c *cli.Context) { + println("Hola, ", c.String("surname")) + }, + }, { + Name: "french", + ShortName: "fr", + Usage: "sends a greeting in french", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "nickname", + Value: "Stevie", + Usage: "Nickname of the person to greet", + }, + }, + Action: func(c *cli.Context) { + println("Bonjour, ", c.String("nickname")) + }, + }, + }, + }, { + Name: "bye", + Usage: "says goodbye", + Action: func(c *cli.Context) { + println("bye") + }, + }, + } + + app.Run(os.Args) +} diff --git a/vendor/github.com/codegangsta/cli/command.go b/vendor/github.com/codegangsta/cli/command.go index 824e77ba..dcc8de5c 100644 --- a/vendor/github.com/codegangsta/cli/command.go +++ b/vendor/github.com/codegangsta/cli/command.go @@ -10,24 +10,17 @@ import ( type Command struct { // The name of the command Name string - // short name of the command. Typically one character (deprecated, use `Aliases`) + // short name of the command. Typically one character ShortName string - // A list of aliases for the command - Aliases []string // A short description of the usage of this command Usage string // A longer explanation of how the command works Description string - // A short description of the arguments of this command - ArgsUsage string // The function to call when checking for bash command completions BashComplete func(context *Context) // An action to execute before any sub-subcommands are run, but after the context is ready // If a non-nil error is returned, no sub-subcommands are run Before func(context *Context) error - // An action to execute after any subcommands are run, but after the subcommand has finished - // It is run even if Action() panics - After func(context *Context) error // The function to call when this command is invoked Action func(context *Context) // List of child commands @@ -38,28 +31,16 @@ type Command struct { SkipFlagParsing bool // Boolean to hide built-in help command HideHelp bool - - // Full name of command for help, defaults to full command name, including parent commands. - HelpName string - commandNamePath []string -} - -// Returns the full name of the command. -// For subcommands this ensures that parent commands are part of the command path -func (c Command) FullName() string { - if c.commandNamePath == nil { - return c.Name - } - return strings.Join(c.commandNamePath, " ") } // Invokes the command given the context, parses ctx.Args() to generate command-specific flags func (c Command) Run(ctx *Context) error { - if len(c.Subcommands) > 0 || c.Before != nil || c.After != nil { + + if len(c.Subcommands) > 0 || c.Before != nil { return c.startApp(ctx) } - if !c.HideHelp && (HelpFlag != BoolFlag{}) { + if !c.HideHelp { // append help to flags c.Flags = append( c.Flags, @@ -74,57 +55,40 @@ func (c Command) Run(ctx *Context) error { set := flagSet(c.Name, c.Flags) set.SetOutput(ioutil.Discard) - var err error - if !c.SkipFlagParsing { - firstFlagIndex := -1 - terminatorIndex := -1 - for index, arg := range ctx.Args() { - if arg == "--" { - terminatorIndex = index - break - } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 { - firstFlagIndex = index - } - } - - if firstFlagIndex > -1 { - args := ctx.Args() - regularArgs := make([]string, len(args[1:firstFlagIndex])) - copy(regularArgs, args[1:firstFlagIndex]) - - var flagArgs []string - if terminatorIndex > -1 { - flagArgs = args[firstFlagIndex:terminatorIndex] - regularArgs = append(regularArgs, args[terminatorIndex:]...) - } else { - flagArgs = args[firstFlagIndex:] - } - - err = set.Parse(append(flagArgs, regularArgs...)) - } else { - err = set.Parse(ctx.Args().Tail()) - } - } else { - if c.SkipFlagParsing { - err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...)) + firstFlagIndex := -1 + for index, arg := range ctx.Args() { + if strings.HasPrefix(arg, "-") { + firstFlagIndex = index + break } } + var err error + if firstFlagIndex > -1 && !c.SkipFlagParsing { + args := ctx.Args() + regularArgs := args[1:firstFlagIndex] + flagArgs := args[firstFlagIndex:] + err = set.Parse(append(flagArgs, regularArgs...)) + } else { + err = set.Parse(ctx.Args().Tail()) + } + if err != nil { - fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.") - fmt.Fprintln(ctx.App.Writer) + fmt.Printf("Incorrect Usage.\n\n") ShowCommandHelp(ctx, c.Name) + fmt.Println("") return err } nerr := normalizeFlags(c.Flags, set) if nerr != nil { - fmt.Fprintln(ctx.App.Writer, nerr) - fmt.Fprintln(ctx.App.Writer) + fmt.Println(nerr) + fmt.Println("") ShowCommandHelp(ctx, c.Name) + fmt.Println("") return nerr } - context := NewContext(ctx.App, set, ctx) + context := NewContext(ctx.App, set, ctx.globalSet) if checkCommandCompletions(context, c.Name) { return nil @@ -138,24 +102,9 @@ func (c Command) Run(ctx *Context) error { return nil } -func (c Command) Names() []string { - names := []string{c.Name} - - if c.ShortName != "" { - names = append(names, c.ShortName) - } - - return append(names, c.Aliases...) -} - // Returns true if Command.Name or Command.ShortName matches given name func (c Command) HasName(name string) bool { - for _, n := range c.Names() { - if n == name { - return true - } - } - return false + return c.Name == name || c.ShortName == name } func (c Command) startApp(ctx *Context) error { @@ -163,33 +112,17 @@ func (c Command) startApp(ctx *Context) error { // set the name and usage app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) - if c.HelpName == "" { - app.HelpName = c.HelpName - } else { - app.HelpName = fmt.Sprintf("%s %s", ctx.App.Name, c.Name) - } - if c.Description != "" { app.Usage = c.Description } else { app.Usage = c.Usage } - // set CommandNotFound - app.CommandNotFound = ctx.App.CommandNotFound - // set the flags and commands app.Commands = c.Subcommands app.Flags = c.Flags app.HideHelp = c.HideHelp - app.Version = ctx.App.Version - app.HideVersion = ctx.App.HideVersion - app.Compiled = ctx.App.Compiled - app.Author = ctx.App.Author - app.Email = ctx.App.Email - app.Writer = ctx.App.Writer - // bash completion app.EnableBashCompletion = ctx.App.EnableBashCompletion if c.BashComplete != nil { @@ -198,19 +131,11 @@ func (c Command) startApp(ctx *Context) error { // set the actions app.Before = c.Before - app.After = c.After if c.Action != nil { app.Action = c.Action } else { app.Action = helpSubcommand.Action } - var newCmds []Command - for _, cc := range app.Commands { - cc.commandNamePath = []string{c.Name, cc.Name} - newCmds = append(newCmds, cc) - } - app.Commands = newCmds - return app.RunAsSubcommand(ctx) } diff --git a/vendor/github.com/codegangsta/cli/command_test.go b/vendor/github.com/codegangsta/cli/command_test.go index dd9fc87f..3afd83e7 100644 --- a/vendor/github.com/codegangsta/cli/command_test.go +++ b/vendor/github.com/codegangsta/cli/command_test.go @@ -1,43 +1,48 @@ -package cli +package cli_test import ( - "errors" "flag" + "github.com/codegangsta/cli" "testing" ) -func TestCommandFlagParsing(t *testing.T) { - cases := []struct { - testArgs []string - skipFlagParsing bool - expectedErr error - }{ - {[]string{"blah", "blah", "-break"}, false, errors.New("flag provided but not defined: -break")}, // Test normal "not ignoring flags" flow - {[]string{"blah", "blah"}, true, nil}, // Test SkipFlagParsing without any args that look like flags - {[]string{"blah", "-break"}, true, nil}, // Test SkipFlagParsing with random flag arg - {[]string{"blah", "-help"}, true, nil}, // Test SkipFlagParsing with "special" help flag arg +func TestCommandDoNotIgnoreFlags(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + test := []string{"blah", "blah", "-break"} + set.Parse(test) + + c := cli.NewContext(app, set, set) + + command := cli.Command { + Name: "test-cmd", + ShortName: "tc", + Usage: "this is for testing", + Description: "testing", + Action: func(_ *cli.Context) { }, } + err := command.Run(c) - for _, c := range cases { - app := NewApp() - set := flag.NewFlagSet("test", 0) - set.Parse(c.testArgs) - - context := NewContext(app, set, nil) - - command := Command{ - Name: "test-cmd", - Aliases: []string{"tc"}, - Usage: "this is for testing", - Description: "testing", - Action: func(_ *Context) {}, - } - - command.SkipFlagParsing = c.skipFlagParsing - - err := command.Run(context) - - expect(t, err, c.expectedErr) - expect(t, []string(context.Args()), c.testArgs) - } + expect(t, err.Error(), "flag provided but not defined: -break") +} + +func TestCommandIgnoreFlags(t *testing.T) { + app := cli.NewApp() + set := flag.NewFlagSet("test", 0) + test := []string{"blah", "blah"} + set.Parse(test) + + c := cli.NewContext(app, set, set) + + command := cli.Command { + Name: "test-cmd", + ShortName: "tc", + Usage: "this is for testing", + Description: "testing", + Action: func(_ *cli.Context) { }, + SkipFlagParsing: true, + } + err := command.Run(c) + + expect(t, err, nil) } diff --git a/vendor/github.com/codegangsta/cli/context.go b/vendor/github.com/codegangsta/cli/context.go index f541f41c..1e023cef 100644 --- a/vendor/github.com/codegangsta/cli/context.go +++ b/vendor/github.com/codegangsta/cli/context.go @@ -5,7 +5,6 @@ import ( "flag" "strconv" "strings" - "time" ) // Context is a type that is passed through to @@ -13,17 +12,16 @@ import ( // can be used to retrieve context-specific Args and // parsed command-line options. type Context struct { - App *App - Command Command - flagSet *flag.FlagSet - setFlags map[string]bool - globalSetFlags map[string]bool - parentContext *Context + App *App + Command Command + flagSet *flag.FlagSet + globalSet *flag.FlagSet + setFlags map[string]bool } // Creates a new context. For use in when invoking an App or Command action. -func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context { - return &Context{App: app, flagSet: set, parentContext: parentCtx} +func NewContext(app *App, set *flag.FlagSet, globalSet *flag.FlagSet) *Context { + return &Context{App: app, flagSet: set, globalSet: globalSet} } // Looks up the value of a local int flag, returns 0 if no int flag exists @@ -31,11 +29,6 @@ func (c *Context) Int(name string) int { return lookupInt(name, c.flagSet) } -// Looks up the value of a local time.Duration flag, returns 0 if no time.Duration flag exists -func (c *Context) Duration(name string) time.Duration { - return lookupDuration(name, c.flagSet) -} - // Looks up the value of a local float64 flag, returns 0 if no float64 flag exists func (c *Context) Float64(name string) float64 { return lookupFloat64(name, c.flagSet) @@ -73,66 +66,35 @@ func (c *Context) Generic(name string) interface{} { // Looks up the value of a global int flag, returns 0 if no int flag exists func (c *Context) GlobalInt(name string) int { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupInt(name, fs) - } - return 0 -} - -// Looks up the value of a global time.Duration flag, returns 0 if no time.Duration flag exists -func (c *Context) GlobalDuration(name string) time.Duration { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupDuration(name, fs) - } - return 0 + return lookupInt(name, c.globalSet) } // Looks up the value of a global bool flag, returns false if no bool flag exists func (c *Context) GlobalBool(name string) bool { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupBool(name, fs) - } - return false + return lookupBool(name, c.globalSet) } // Looks up the value of a global string flag, returns "" if no string flag exists func (c *Context) GlobalString(name string) string { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupString(name, fs) - } - return "" + return lookupString(name, c.globalSet) } // Looks up the value of a global string slice flag, returns nil if no string slice flag exists func (c *Context) GlobalStringSlice(name string) []string { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupStringSlice(name, fs) - } - return nil + return lookupStringSlice(name, c.globalSet) } // Looks up the value of a global int slice flag, returns nil if no int slice flag exists func (c *Context) GlobalIntSlice(name string) []int { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupIntSlice(name, fs) - } - return nil + return lookupIntSlice(name, c.globalSet) } // Looks up the value of a global generic flag, returns nil if no generic flag exists func (c *Context) GlobalGeneric(name string) interface{} { - if fs := lookupGlobalFlagSet(name, c); fs != nil { - return lookupGeneric(name, fs) - } - return nil + return lookupGeneric(name, c.globalSet) } -// Returns the number of flags set -func (c *Context) NumFlags() int { - return c.flagSet.NFlag() -} - -// Determines if the flag was actually set +// Determines if the flag was actually set exists func (c *Context) IsSet(name string) bool { if c.setFlags == nil { c.setFlags = make(map[string]bool) @@ -143,52 +105,6 @@ func (c *Context) IsSet(name string) bool { return c.setFlags[name] == true } -// Determines if the global flag was actually set -func (c *Context) GlobalIsSet(name string) bool { - if c.globalSetFlags == nil { - c.globalSetFlags = make(map[string]bool) - ctx := c - if ctx.parentContext != nil { - ctx = ctx.parentContext - } - for ; ctx != nil && c.globalSetFlags[name] == false; ctx = ctx.parentContext { - ctx.flagSet.Visit(func(f *flag.Flag) { - c.globalSetFlags[f.Name] = true - }) - } - } - return c.globalSetFlags[name] -} - -// Returns a slice of flag names used in this context. -func (c *Context) FlagNames() (names []string) { - for _, flag := range c.Command.Flags { - name := strings.Split(flag.getName(), ",")[0] - if name == "help" { - continue - } - names = append(names, name) - } - return -} - -// Returns a slice of global flag names used by the app. -func (c *Context) GlobalFlagNames() (names []string) { - for _, flag := range c.App.Flags { - name := strings.Split(flag.getName(), ",")[0] - if name == "help" || name == "version" { - continue - } - names = append(names, name) - } - return -} - -// Returns the parent context, if any -func (c *Context) Parent() *Context { - return c.parentContext -} - type Args []string // Returns the command line arguments associated with the context. @@ -233,18 +149,6 @@ func (a Args) Swap(from, to int) error { return nil } -func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet { - if ctx.parentContext != nil { - ctx = ctx.parentContext - } - for ; ctx != nil; ctx = ctx.parentContext { - if f := ctx.flagSet.Lookup(name); f != nil { - return ctx.flagSet - } - } - return nil -} - func lookupInt(name string, set *flag.FlagSet) int { f := set.Lookup(name) if f != nil { @@ -258,18 +162,6 @@ func lookupInt(name string, set *flag.FlagSet) int { return 0 } -func lookupDuration(name string, set *flag.FlagSet) time.Duration { - f := set.Lookup(name) - if f != nil { - val, err := time.ParseDuration(f.Value.String()) - if err == nil { - return val - } - } - - return 0 -} - func lookupFloat64(name string, set *flag.FlagSet) float64 { f := set.Lookup(name) if f != nil { diff --git a/vendor/github.com/codegangsta/cli/context_test.go b/vendor/github.com/codegangsta/cli/context_test.go index 7f8e9289..89041b99 100644 --- a/vendor/github.com/codegangsta/cli/context_test.go +++ b/vendor/github.com/codegangsta/cli/context_test.go @@ -1,9 +1,9 @@ -package cli +package cli_test import ( "flag" + "github.com/codegangsta/cli" "testing" - "time" ) func TestNewContext(t *testing.T) { @@ -11,9 +11,8 @@ func TestNewContext(t *testing.T) { set.Int("myflag", 12, "doc") globalSet := flag.NewFlagSet("test", 0) globalSet.Int("myflag", 42, "doc") - globalCtx := NewContext(nil, globalSet, nil) - command := Command{Name: "mycommand"} - c := NewContext(nil, set, globalCtx) + command := cli.Command{Name: "mycommand"} + c := cli.NewContext(nil, set, globalSet) c.Command = command expect(t, c.Int("myflag"), 12) expect(t, c.GlobalInt("myflag"), 42) @@ -23,42 +22,35 @@ func TestNewContext(t *testing.T) { func TestContext_Int(t *testing.T) { set := flag.NewFlagSet("test", 0) set.Int("myflag", 12, "doc") - c := NewContext(nil, set, nil) + c := cli.NewContext(nil, set, set) expect(t, c.Int("myflag"), 12) } -func TestContext_Duration(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Duration("myflag", time.Duration(12*time.Second), "doc") - c := NewContext(nil, set, nil) - expect(t, c.Duration("myflag"), time.Duration(12*time.Second)) -} - func TestContext_String(t *testing.T) { set := flag.NewFlagSet("test", 0) set.String("myflag", "hello world", "doc") - c := NewContext(nil, set, nil) + c := cli.NewContext(nil, set, set) expect(t, c.String("myflag"), "hello world") } func TestContext_Bool(t *testing.T) { set := flag.NewFlagSet("test", 0) set.Bool("myflag", false, "doc") - c := NewContext(nil, set, nil) + c := cli.NewContext(nil, set, set) expect(t, c.Bool("myflag"), false) } func TestContext_BoolT(t *testing.T) { set := flag.NewFlagSet("test", 0) set.Bool("myflag", true, "doc") - c := NewContext(nil, set, nil) + c := cli.NewContext(nil, set, set) expect(t, c.BoolT("myflag"), true) } func TestContext_Args(t *testing.T) { set := flag.NewFlagSet("test", 0) set.Bool("myflag", false, "doc") - c := NewContext(nil, set, nil) + c := cli.NewContext(nil, set, set) set.Parse([]string{"--myflag", "bat", "baz"}) expect(t, len(c.Args()), 2) expect(t, c.Bool("myflag"), true) @@ -68,46 +60,9 @@ func TestContext_IsSet(t *testing.T) { set := flag.NewFlagSet("test", 0) set.Bool("myflag", false, "doc") set.String("otherflag", "hello world", "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Bool("myflagGlobal", true, "doc") - globalCtx := NewContext(nil, globalSet, nil) - c := NewContext(nil, set, globalCtx) + c := cli.NewContext(nil, set, set) set.Parse([]string{"--myflag", "bat", "baz"}) - globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) expect(t, c.IsSet("myflag"), true) expect(t, c.IsSet("otherflag"), false) expect(t, c.IsSet("bogusflag"), false) - expect(t, c.IsSet("myflagGlobal"), false) -} - -func TestContext_GlobalIsSet(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - set.String("otherflag", "hello world", "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Bool("myflagGlobal", true, "doc") - globalSet.Bool("myflagGlobalUnset", true, "doc") - globalCtx := NewContext(nil, globalSet, nil) - c := NewContext(nil, set, globalCtx) - set.Parse([]string{"--myflag", "bat", "baz"}) - globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"}) - expect(t, c.GlobalIsSet("myflag"), false) - expect(t, c.GlobalIsSet("otherflag"), false) - expect(t, c.GlobalIsSet("bogusflag"), false) - expect(t, c.GlobalIsSet("myflagGlobal"), true) - expect(t, c.GlobalIsSet("myflagGlobalUnset"), false) - expect(t, c.GlobalIsSet("bogusGlobal"), false) -} - -func TestContext_NumFlags(t *testing.T) { - set := flag.NewFlagSet("test", 0) - set.Bool("myflag", false, "doc") - set.String("otherflag", "hello world", "doc") - globalSet := flag.NewFlagSet("test", 0) - globalSet.Bool("myflagGlobal", true, "doc") - globalCtx := NewContext(nil, globalSet, nil) - c := NewContext(nil, set, globalCtx) - set.Parse([]string{"--myflag", "--otherflag=foo"}) - globalSet.Parse([]string{"--myflagGlobal"}) - expect(t, c.NumFlags(), 2) } diff --git a/vendor/github.com/codegangsta/cli/flag.go b/vendor/github.com/codegangsta/cli/flag.go index 9b22d7f1..60353e22 100644 --- a/vendor/github.com/codegangsta/cli/flag.go +++ b/vendor/github.com/codegangsta/cli/flag.go @@ -6,7 +6,6 @@ import ( "os" "strconv" "strings" - "time" ) // This flag enables bash-completion for all commands and subcommands @@ -21,8 +20,6 @@ var VersionFlag = BoolFlag{ } // This flag prints the help for all commands and subcommands -// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand -// unless HideHelp is set to true) var HelpFlag = BoolFlag{ Name: "help, h", Usage: "show help", @@ -69,24 +66,15 @@ type GenericFlag struct { EnvVar string } -// String returns the string representation of the generic flag to display the -// help text to the user (uses the String() method of the generic flag to show -// the value) func (f GenericFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s%s \"%v\"\t%v", prefixFor(f.Name), f.Name, f.Value, f.Usage)) + return withEnvHint(f.EnvVar, fmt.Sprintf("%s%s %v\t`%v` %s", prefixFor(f.Name), f.Name, f.Value, "-"+f.Name+" option -"+f.Name+" option", f.Usage)) } -// Apply takes the flagset and calls Set on the generic flag with the value -// provided by the user for parsing by the flag func (f GenericFlag) Apply(set *flag.FlagSet) { val := f.Value if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - val.Set(envVal) - break - } + if envVal := os.Getenv(f.EnvVar); envVal != "" { + val.Set(envVal) } } @@ -99,27 +87,21 @@ func (f GenericFlag) getName() string { return f.Name } -// StringSlice is an opaque type for []string to satisfy flag.Value type StringSlice []string -// Set appends the string value to the list of values func (f *StringSlice) Set(value string) error { *f = append(*f, value) return nil } -// String returns a readable representation of this value (for usage defaults) func (f *StringSlice) String() string { return fmt.Sprintf("%s", *f) } -// Value returns the slice of strings set by this flag func (f *StringSlice) Value() []string { return *f } -// StringSlice is a string flag that can be specified multiple times on the -// command-line type StringSliceFlag struct { Name string Value *StringSlice @@ -127,34 +109,24 @@ type StringSliceFlag struct { EnvVar string } -// String returns the usage func (f StringSliceFlag) String() string { firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") pref := prefixFor(firstName) - return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) + return withEnvHint(f.EnvVar, fmt.Sprintf("%s '%v'\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) } -// Apply populates the flag given the flag set and environment func (f StringSliceFlag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - newVal := &StringSlice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - newVal.Set(s) - } - f.Value = newVal - break + if envVal := os.Getenv(f.EnvVar); envVal != "" { + newVal := &StringSlice{} + for _, s := range strings.Split(envVal, ",") { + newVal.Set(s) } + f.Value = newVal } } eachName(f.Name, func(name string) { - if f.Value == nil { - f.Value = &StringSlice{} - } set.Var(f.Value, name, f.Usage) }) } @@ -163,11 +135,10 @@ func (f StringSliceFlag) getName() string { return f.Name } -// StringSlice is an opaque type for []int to satisfy flag.Value type IntSlice []int -// Set parses the value into an integer and appends it to the list of values func (f *IntSlice) Set(value string) error { + tmp, err := strconv.Atoi(value) if err != nil { return err @@ -177,18 +148,14 @@ func (f *IntSlice) Set(value string) error { return nil } -// String returns a readable representation of this value (for usage defaults) func (f *IntSlice) String() string { return fmt.Sprintf("%d", *f) } -// Value returns the slice of ints set by this flag func (f *IntSlice) Value() []int { return *f } -// IntSliceFlag is an int flag that can be specified multiple times on the -// command-line type IntSliceFlag struct { Name string Value *IntSlice @@ -196,37 +163,27 @@ type IntSliceFlag struct { EnvVar string } -// String returns the usage func (f IntSliceFlag) String() string { firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ") pref := prefixFor(firstName) - return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) + return withEnvHint(f.EnvVar, fmt.Sprintf("%s '%v'\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage)) } -// Apply populates the flag given the flag set and environment func (f IntSliceFlag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - newVal := &IntSlice{} - for _, s := range strings.Split(envVal, ",") { - s = strings.TrimSpace(s) - err := newVal.Set(s) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - } + if envVal := os.Getenv(f.EnvVar); envVal != "" { + newVal := &IntSlice{} + for _, s := range strings.Split(envVal, ",") { + err := newVal.Set(s) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) } - f.Value = newVal - break } + f.Value = newVal } } eachName(f.Name, func(name string) { - if f.Value == nil { - f.Value = &IntSlice{} - } set.Var(f.Value, name, f.Usage) }) } @@ -235,40 +192,28 @@ func (f IntSliceFlag) getName() string { return f.Name } -// BoolFlag is a switch that defaults to false type BoolFlag struct { - Name string - Usage string - EnvVar string - Destination *bool + Name string + Usage string + EnvVar string } -// String returns a readable representation of this value (for usage defaults) func (f BoolFlag) String() string { return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) } -// Apply populates the flag given the flag set and environment func (f BoolFlag) Apply(set *flag.FlagSet) { val := false if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValBool, err := strconv.ParseBool(envVal) - if err == nil { - val = envValBool - } - break + if envVal := os.Getenv(f.EnvVar); envVal != "" { + envValBool, err := strconv.ParseBool(envVal) + if err == nil { + val = envValBool } } } eachName(f.Name, func(name string) { - if f.Destination != nil { - set.BoolVar(f.Destination, name, val, f.Usage) - return - } set.Bool(name, val, f.Usage) }) } @@ -277,41 +222,28 @@ func (f BoolFlag) getName() string { return f.Name } -// BoolTFlag this represents a boolean flag that is true by default, but can -// still be set to false by --some-flag=false type BoolTFlag struct { - Name string - Usage string - EnvVar string - Destination *bool + Name string + Usage string + EnvVar string } -// String returns a readable representation of this value (for usage defaults) func (f BoolTFlag) String() string { return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage)) } -// Apply populates the flag given the flag set and environment func (f BoolTFlag) Apply(set *flag.FlagSet) { val := true if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValBool, err := strconv.ParseBool(envVal) - if err == nil { - val = envValBool - break - } + if envVal := os.Getenv(f.EnvVar); envVal != "" { + envValBool, err := strconv.ParseBool(envVal) + if err == nil { + val = envValBool } } } eachName(f.Name, func(name string) { - if f.Destination != nil { - set.BoolVar(f.Destination, name, val, f.Usage) - return - } set.Bool(name, val, f.Usage) }) } @@ -320,22 +252,19 @@ func (f BoolTFlag) getName() string { return f.Name } -// StringFlag represents a flag that takes as string value type StringFlag struct { - Name string - Value string - Usage string - EnvVar string - Destination *string + Name string + Value string + Usage string + EnvVar string } -// String returns the usage func (f StringFlag) String() string { var fmtString string fmtString = "%s %v\t%v" if len(f.Value) > 0 { - fmtString = "%s \"%v\"\t%v" + fmtString = "%s '%v'\t%v" } else { fmtString = "%s %v\t%v" } @@ -343,23 +272,14 @@ func (f StringFlag) String() string { return withEnvHint(f.EnvVar, fmt.Sprintf(fmtString, prefixedNames(f.Name), f.Value, f.Usage)) } -// Apply populates the flag given the flag set and environment func (f StringFlag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - f.Value = envVal - break - } + if envVal := os.Getenv(f.EnvVar); envVal != "" { + f.Value = envVal } } eachName(f.Name, func(name string) { - if f.Destination != nil { - set.StringVar(f.Destination, name, f.Value, f.Usage) - return - } set.String(name, f.Value, f.Usage) }) } @@ -368,41 +288,28 @@ func (f StringFlag) getName() string { return f.Name } -// IntFlag is a flag that takes an integer -// Errors if the value provided cannot be parsed type IntFlag struct { - Name string - Value int - Usage string - EnvVar string - Destination *int + Name string + Value int + Usage string + EnvVar string } -// String returns the usage func (f IntFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) + return withEnvHint(f.EnvVar, fmt.Sprintf("%s '%v'\t%v", prefixedNames(f.Name), f.Value, f.Usage)) } -// Apply populates the flag given the flag set and environment func (f IntFlag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValInt, err := strconv.ParseInt(envVal, 0, 64) - if err == nil { - f.Value = int(envValInt) - break - } + if envVal := os.Getenv(f.EnvVar); envVal != "" { + envValInt, err := strconv.ParseUint(envVal, 10, 64) + if err == nil { + f.Value = int(envValInt) } } } eachName(f.Name, func(name string) { - if f.Destination != nil { - set.IntVar(f.Destination, name, f.Value, f.Usage) - return - } set.Int(name, f.Value, f.Usage) }) } @@ -411,83 +318,28 @@ func (f IntFlag) getName() string { return f.Name } -// DurationFlag is a flag that takes a duration specified in Go's duration -// format: https://golang.org/pkg/time/#ParseDuration -type DurationFlag struct { - Name string - Value time.Duration - Usage string - EnvVar string - Destination *time.Duration -} - -// String returns a readable representation of this value (for usage defaults) -func (f DurationFlag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) -} - -// Apply populates the flag given the flag set and environment -func (f DurationFlag) Apply(set *flag.FlagSet) { - if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValDuration, err := time.ParseDuration(envVal) - if err == nil { - f.Value = envValDuration - break - } - } - } - } - - eachName(f.Name, func(name string) { - if f.Destination != nil { - set.DurationVar(f.Destination, name, f.Value, f.Usage) - return - } - set.Duration(name, f.Value, f.Usage) - }) -} - -func (f DurationFlag) getName() string { - return f.Name -} - -// Float64Flag is a flag that takes an float value -// Errors if the value provided cannot be parsed type Float64Flag struct { - Name string - Value float64 - Usage string - EnvVar string - Destination *float64 + Name string + Value float64 + Usage string + EnvVar string } -// String returns the usage func (f Float64Flag) String() string { - return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage)) + return withEnvHint(f.EnvVar, fmt.Sprintf("%s '%v'\t%v", prefixedNames(f.Name), f.Value, f.Usage)) } -// Apply populates the flag given the flag set and environment func (f Float64Flag) Apply(set *flag.FlagSet) { if f.EnvVar != "" { - for _, envVar := range strings.Split(f.EnvVar, ",") { - envVar = strings.TrimSpace(envVar) - if envVal := os.Getenv(envVar); envVal != "" { - envValFloat, err := strconv.ParseFloat(envVal, 10) - if err == nil { - f.Value = float64(envValFloat) - } + if envVal := os.Getenv(f.EnvVar); envVal != "" { + envValFloat, err := strconv.ParseFloat(envVal, 10) + if err == nil { + f.Value = float64(envValFloat) } } } eachName(f.Name, func(name string) { - if f.Destination != nil { - set.Float64Var(f.Destination, name, f.Value, f.Usage) - return - } set.Float64(name, f.Value, f.Usage) }) } @@ -521,7 +373,7 @@ func prefixedNames(fullName string) (prefixed string) { func withEnvHint(envVar, str string) string { envText := "" if envVar != "" { - envText = fmt.Sprintf(" [$%s]", strings.Join(strings.Split(envVar, ","), ", $")) + envText = fmt.Sprintf(" [$%s]", envVar) } return str + envText } diff --git a/vendor/github.com/codegangsta/cli/flag_test.go b/vendor/github.com/codegangsta/cli/flag_test.go index 4462d3fe..41032361 100644 --- a/vendor/github.com/codegangsta/cli/flag_test.go +++ b/vendor/github.com/codegangsta/cli/flag_test.go @@ -1,6 +1,8 @@ -package cli +package cli_test import ( + "github.com/codegangsta/cli" + "fmt" "os" "reflect" @@ -19,7 +21,7 @@ var boolFlagTests = []struct { func TestBoolFlagHelpOutput(t *testing.T) { for _, test := range boolFlagTests { - flag := BoolFlag{Name: test.name} + flag := cli.BoolFlag{Name: test.name} output := flag.String() if output != test.expected { @@ -36,13 +38,13 @@ var stringFlagTests = []struct { {"help", "", "--help \t"}, {"h", "", "-h \t"}, {"h", "", "-h \t"}, - {"test", "Something", "--test \"Something\"\t"}, + {"test", "Something", "--test 'Something'\t"}, } func TestStringFlagHelpOutput(t *testing.T) { for _, test := range stringFlagTests { - flag := StringFlag{Name: test.name, Value: test.value} + flag := cli.StringFlag{Name: test.name, Value: test.value} output := flag.String() if output != test.expected { @@ -52,10 +54,10 @@ func TestStringFlagHelpOutput(t *testing.T) { } func TestStringFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() + os.Setenv("APP_FOO", "derp") for _, test := range stringFlagTests { - flag := StringFlag{Name: test.name, Value: test.value, EnvVar: "APP_FOO"} + flag := cli.StringFlag{Name: test.name, Value: test.value, EnvVar: "APP_FOO"} output := flag.String() if !strings.HasSuffix(output, " [$APP_FOO]") { @@ -66,35 +68,35 @@ func TestStringFlagWithEnvVarHelpOutput(t *testing.T) { var stringSliceFlagTests = []struct { name string - value *StringSlice + value *cli.StringSlice expected string }{ - {"help", func() *StringSlice { - s := &StringSlice{} + {"help", func() *cli.StringSlice { + s := &cli.StringSlice{} s.Set("") return s - }(), "--help [--help option --help option]\t"}, - {"h", func() *StringSlice { - s := &StringSlice{} + }(), "--help '--help option --help option'\t"}, + {"h", func() *cli.StringSlice { + s := &cli.StringSlice{} s.Set("") return s - }(), "-h [-h option -h option]\t"}, - {"h", func() *StringSlice { - s := &StringSlice{} + }(), "-h '-h option -h option'\t"}, + {"h", func() *cli.StringSlice { + s := &cli.StringSlice{} s.Set("") return s - }(), "-h [-h option -h option]\t"}, - {"test", func() *StringSlice { - s := &StringSlice{} + }(), "-h '-h option -h option'\t"}, + {"test", func() *cli.StringSlice { + s := &cli.StringSlice{} s.Set("Something") return s - }(), "--test [--test option --test option]\t"}, + }(), "--test '--test option --test option'\t"}, } func TestStringSliceFlagHelpOutput(t *testing.T) { for _, test := range stringSliceFlagTests { - flag := StringSliceFlag{Name: test.name, Value: test.value} + flag := cli.StringSliceFlag{Name: test.name, Value: test.value} output := flag.String() if output != test.expected { @@ -104,10 +106,10 @@ func TestStringSliceFlagHelpOutput(t *testing.T) { } func TestStringSliceFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() + os.Setenv("APP_QWWX", "11,4") for _, test := range stringSliceFlagTests { - flag := StringSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_QWWX"} + flag := cli.StringSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_QWWX"} output := flag.String() if !strings.HasSuffix(output, " [$APP_QWWX]") { @@ -120,14 +122,14 @@ var intFlagTests = []struct { name string expected string }{ - {"help", "--help \"0\"\t"}, - {"h", "-h \"0\"\t"}, + {"help", "--help '0'\t"}, + {"h", "-h '0'\t"}, } func TestIntFlagHelpOutput(t *testing.T) { for _, test := range intFlagTests { - flag := IntFlag{Name: test.name} + flag := cli.IntFlag{Name: test.name} output := flag.String() if output != test.expected { @@ -137,43 +139,10 @@ func TestIntFlagHelpOutput(t *testing.T) { } func TestIntFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() + os.Setenv("APP_BAR", "2") for _, test := range intFlagTests { - flag := IntFlag{Name: test.name, EnvVar: "APP_BAR"} - output := flag.String() - - if !strings.HasSuffix(output, " [$APP_BAR]") { - t.Errorf("%s does not end with [$APP_BAR]", output) - } - } -} - -var durationFlagTests = []struct { - name string - expected string -}{ - {"help", "--help \"0\"\t"}, - {"h", "-h \"0\"\t"}, -} - -func TestDurationFlagHelpOutput(t *testing.T) { - - for _, test := range durationFlagTests { - flag := DurationFlag{Name: test.name} - output := flag.String() - - if output != test.expected { - t.Errorf("%s does not match %s", output, test.expected) - } - } -} - -func TestDurationFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() - os.Setenv("APP_BAR", "2h3m6s") - for _, test := range durationFlagTests { - flag := DurationFlag{Name: test.name, EnvVar: "APP_BAR"} + flag := cli.IntFlag{Name: test.name, EnvVar: "APP_BAR"} output := flag.String() if !strings.HasSuffix(output, " [$APP_BAR]") { @@ -184,23 +153,23 @@ func TestDurationFlagWithEnvVarHelpOutput(t *testing.T) { var intSliceFlagTests = []struct { name string - value *IntSlice + value *cli.IntSlice expected string }{ - {"help", &IntSlice{}, "--help [--help option --help option]\t"}, - {"h", &IntSlice{}, "-h [-h option -h option]\t"}, - {"h", &IntSlice{}, "-h [-h option -h option]\t"}, - {"test", func() *IntSlice { - i := &IntSlice{} + {"help", &cli.IntSlice{}, "--help '--help option --help option'\t"}, + {"h", &cli.IntSlice{}, "-h '-h option -h option'\t"}, + {"h", &cli.IntSlice{}, "-h '-h option -h option'\t"}, + {"test", func() *cli.IntSlice { + i := &cli.IntSlice{} i.Set("9") return i - }(), "--test [--test option --test option]\t"}, + }(), "--test '--test option --test option'\t"}, } func TestIntSliceFlagHelpOutput(t *testing.T) { for _, test := range intSliceFlagTests { - flag := IntSliceFlag{Name: test.name, Value: test.value} + flag := cli.IntSliceFlag{Name: test.name, Value: test.value} output := flag.String() if output != test.expected { @@ -210,10 +179,10 @@ func TestIntSliceFlagHelpOutput(t *testing.T) { } func TestIntSliceFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() + os.Setenv("APP_SMURF", "42,3") for _, test := range intSliceFlagTests { - flag := IntSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_SMURF"} + flag := cli.IntSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_SMURF"} output := flag.String() if !strings.HasSuffix(output, " [$APP_SMURF]") { @@ -226,14 +195,14 @@ var float64FlagTests = []struct { name string expected string }{ - {"help", "--help \"0\"\t"}, - {"h", "-h \"0\"\t"}, + {"help", "--help '0'\t"}, + {"h", "-h '0'\t"}, } func TestFloat64FlagHelpOutput(t *testing.T) { for _, test := range float64FlagTests { - flag := Float64Flag{Name: test.name} + flag := cli.Float64Flag{Name: test.name} output := flag.String() if output != test.expected { @@ -243,10 +212,10 @@ func TestFloat64FlagHelpOutput(t *testing.T) { } func TestFloat64FlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() + os.Setenv("APP_BAZ", "99.4") for _, test := range float64FlagTests { - flag := Float64Flag{Name: test.name, EnvVar: "APP_BAZ"} + flag := cli.Float64Flag{Name: test.name, EnvVar: "APP_BAZ"} output := flag.String() if !strings.HasSuffix(output, " [$APP_BAZ]") { @@ -257,17 +226,18 @@ func TestFloat64FlagWithEnvVarHelpOutput(t *testing.T) { var genericFlagTests = []struct { name string - value Generic + value cli.Generic expected string }{ - {"test", &Parser{"abc", "def"}, "--test \"abc,def\"\ttest flag"}, - {"t", &Parser{"abc", "def"}, "-t \"abc,def\"\ttest flag"}, + {"help", &Parser{}, "--help \t`-help option -help option` "}, + {"h", &Parser{}, "-h \t`-h option -h option` "}, + {"test", &Parser{}, "--test \t`-test option -test option` "}, } func TestGenericFlagHelpOutput(t *testing.T) { for _, test := range genericFlagTests { - flag := GenericFlag{Name: test.name, Value: test.value, Usage: "test flag"} + flag := cli.GenericFlag{Name: test.name} output := flag.String() if output != test.expected { @@ -277,10 +247,10 @@ func TestGenericFlagHelpOutput(t *testing.T) { } func TestGenericFlagWithEnvVarHelpOutput(t *testing.T) { - os.Clearenv() + os.Setenv("APP_ZAP", "3") for _, test := range genericFlagTests { - flag := GenericFlag{Name: test.name, EnvVar: "APP_ZAP"} + flag := cli.GenericFlag{Name: test.name, EnvVar: "APP_ZAP"} output := flag.String() if !strings.HasSuffix(output, " [$APP_ZAP]") { @@ -290,11 +260,11 @@ func TestGenericFlagWithEnvVarHelpOutput(t *testing.T) { } func TestParseMultiString(t *testing.T) { - (&App{ - Flags: []Flag{ - StringFlag{Name: "serve, s"}, + (&cli.App{ + Flags: []cli.Flag{ + cli.StringFlag{Name: "serve, s"}, }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if ctx.String("serve") != "10" { t.Errorf("main name not set") } @@ -305,50 +275,13 @@ func TestParseMultiString(t *testing.T) { }).Run([]string{"run", "-s", "10"}) } -func TestParseDestinationString(t *testing.T) { - var dest string - a := App{ - Flags: []Flag{ - StringFlag{ - Name: "dest", - Destination: &dest, - }, - }, - Action: func(ctx *Context) { - if dest != "10" { - t.Errorf("expected destination String 10") - } - }, - } - a.Run([]string{"run", "--dest", "10"}) -} - func TestParseMultiStringFromEnv(t *testing.T) { - os.Clearenv() os.Setenv("APP_COUNT", "20") - (&App{ - Flags: []Flag{ - StringFlag{Name: "count, c", EnvVar: "APP_COUNT"}, + (&cli.App{ + Flags: []cli.Flag{ + cli.StringFlag{Name: "count, c", EnvVar: "APP_COUNT"}, }, - Action: func(ctx *Context) { - if ctx.String("count") != "20" { - t.Errorf("main name not set") - } - if ctx.String("c") != "20" { - t.Errorf("short name not set") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiStringFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_COUNT", "20") - (&App{ - Flags: []Flag{ - StringFlag{Name: "count, c", EnvVar: "COMPAT_COUNT,APP_COUNT"}, - }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if ctx.String("count") != "20" { t.Errorf("main name not set") } @@ -360,11 +293,11 @@ func TestParseMultiStringFromEnvCascade(t *testing.T) { } func TestParseMultiStringSlice(t *testing.T) { - (&App{ - Flags: []Flag{ - StringSliceFlag{Name: "serve, s", Value: &StringSlice{}}, + (&cli.App{ + Flags: []cli.Flag{ + cli.StringSliceFlag{Name: "serve, s", Value: &cli.StringSlice{}}, }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if !reflect.DeepEqual(ctx.StringSlice("serve"), []string{"10", "20"}) { t.Errorf("main name not set") } @@ -376,33 +309,13 @@ func TestParseMultiStringSlice(t *testing.T) { } func TestParseMultiStringSliceFromEnv(t *testing.T) { - os.Clearenv() os.Setenv("APP_INTERVALS", "20,30,40") - (&App{ - Flags: []Flag{ - StringSliceFlag{Name: "intervals, i", Value: &StringSlice{}, EnvVar: "APP_INTERVALS"}, + (&cli.App{ + Flags: []cli.Flag{ + cli.StringSliceFlag{Name: "intervals, i", Value: &cli.StringSlice{}, EnvVar: "APP_INTERVALS"}, }, - Action: func(ctx *Context) { - if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiStringSliceFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&App{ - Flags: []Flag{ - StringSliceFlag{Name: "intervals, i", Value: &StringSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, - }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) { t.Errorf("main name not set from env") } @@ -414,11 +327,11 @@ func TestParseMultiStringSliceFromEnvCascade(t *testing.T) { } func TestParseMultiInt(t *testing.T) { - a := App{ - Flags: []Flag{ - IntFlag{Name: "serve, s"}, + a := cli.App{ + Flags: []cli.Flag{ + cli.IntFlag{Name: "serve, s"}, }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if ctx.Int("serve") != 10 { t.Errorf("main name not set") } @@ -430,51 +343,13 @@ func TestParseMultiInt(t *testing.T) { a.Run([]string{"run", "-s", "10"}) } -func TestParseDestinationInt(t *testing.T) { - var dest int - a := App{ - Flags: []Flag{ - IntFlag{ - Name: "dest", - Destination: &dest, - }, - }, - Action: func(ctx *Context) { - if dest != 10 { - t.Errorf("expected destination Int 10") - } - }, - } - a.Run([]string{"run", "--dest", "10"}) -} - func TestParseMultiIntFromEnv(t *testing.T) { - os.Clearenv() os.Setenv("APP_TIMEOUT_SECONDS", "10") - a := App{ - Flags: []Flag{ - IntFlag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, + a := cli.App{ + Flags: []cli.Flag{ + cli.IntFlag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, }, - Action: func(ctx *Context) { - if ctx.Int("timeout") != 10 { - t.Errorf("main name not set") - } - if ctx.Int("t") != 10 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiIntFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "10") - a := App{ - Flags: []Flag{ - IntFlag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if ctx.Int("timeout") != 10 { t.Errorf("main name not set") } @@ -487,11 +362,11 @@ func TestParseMultiIntFromEnvCascade(t *testing.T) { } func TestParseMultiIntSlice(t *testing.T) { - (&App{ - Flags: []Flag{ - IntSliceFlag{Name: "serve, s", Value: &IntSlice{}}, + (&cli.App{ + Flags: []cli.Flag{ + cli.IntSliceFlag{Name: "serve, s", Value: &cli.IntSlice{}}, }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if !reflect.DeepEqual(ctx.IntSlice("serve"), []int{10, 20}) { t.Errorf("main name not set") } @@ -503,33 +378,13 @@ func TestParseMultiIntSlice(t *testing.T) { } func TestParseMultiIntSliceFromEnv(t *testing.T) { - os.Clearenv() os.Setenv("APP_INTERVALS", "20,30,40") - (&App{ - Flags: []Flag{ - IntSliceFlag{Name: "intervals, i", Value: &IntSlice{}, EnvVar: "APP_INTERVALS"}, + (&cli.App{ + Flags: []cli.Flag{ + cli.IntSliceFlag{Name: "intervals, i", Value: &cli.IntSlice{}, EnvVar: "APP_INTERVALS"}, }, - Action: func(ctx *Context) { - if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) { - t.Errorf("main name not set from env") - } - if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) { - t.Errorf("short name not set from env") - } - }, - }).Run([]string{"run"}) -} - -func TestParseMultiIntSliceFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_INTERVALS", "20,30,40") - - (&App{ - Flags: []Flag{ - IntSliceFlag{Name: "intervals, i", Value: &IntSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"}, - }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) { t.Errorf("main name not set from env") } @@ -541,11 +396,11 @@ func TestParseMultiIntSliceFromEnvCascade(t *testing.T) { } func TestParseMultiFloat64(t *testing.T) { - a := App{ - Flags: []Flag{ - Float64Flag{Name: "serve, s"}, + a := cli.App{ + Flags: []cli.Flag{ + cli.Float64Flag{Name: "serve, s"}, }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if ctx.Float64("serve") != 10.2 { t.Errorf("main name not set") } @@ -557,51 +412,13 @@ func TestParseMultiFloat64(t *testing.T) { a.Run([]string{"run", "-s", "10.2"}) } -func TestParseDestinationFloat64(t *testing.T) { - var dest float64 - a := App{ - Flags: []Flag{ - Float64Flag{ - Name: "dest", - Destination: &dest, - }, - }, - Action: func(ctx *Context) { - if dest != 10.2 { - t.Errorf("expected destination Float64 10.2") - } - }, - } - a.Run([]string{"run", "--dest", "10.2"}) -} - func TestParseMultiFloat64FromEnv(t *testing.T) { - os.Clearenv() os.Setenv("APP_TIMEOUT_SECONDS", "15.5") - a := App{ - Flags: []Flag{ - Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, + a := cli.App{ + Flags: []cli.Flag{ + cli.Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"}, }, - Action: func(ctx *Context) { - if ctx.Float64("timeout") != 15.5 { - t.Errorf("main name not set") - } - if ctx.Float64("t") != 15.5 { - t.Errorf("short name not set") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiFloat64FromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_TIMEOUT_SECONDS", "15.5") - a := App{ - Flags: []Flag{ - Float64Flag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"}, - }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if ctx.Float64("timeout") != 15.5 { t.Errorf("main name not set") } @@ -614,11 +431,11 @@ func TestParseMultiFloat64FromEnvCascade(t *testing.T) { } func TestParseMultiBool(t *testing.T) { - a := App{ - Flags: []Flag{ - BoolFlag{Name: "serve, s"}, + a := cli.App{ + Flags: []cli.Flag{ + cli.BoolFlag{Name: "serve, s"}, }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if ctx.Bool("serve") != true { t.Errorf("main name not set") } @@ -630,51 +447,13 @@ func TestParseMultiBool(t *testing.T) { a.Run([]string{"run", "--serve"}) } -func TestParseDestinationBool(t *testing.T) { - var dest bool - a := App{ - Flags: []Flag{ - BoolFlag{ - Name: "dest", - Destination: &dest, - }, - }, - Action: func(ctx *Context) { - if dest != true { - t.Errorf("expected destination Bool true") - } - }, - } - a.Run([]string{"run", "--dest"}) -} - func TestParseMultiBoolFromEnv(t *testing.T) { - os.Clearenv() os.Setenv("APP_DEBUG", "1") - a := App{ - Flags: []Flag{ - BoolFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, + a := cli.App{ + Flags: []cli.Flag{ + cli.BoolFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, }, - Action: func(ctx *Context) { - if ctx.Bool("debug") != true { - t.Errorf("main name not set from env") - } - if ctx.Bool("d") != true { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBoolFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "1") - a := App{ - Flags: []Flag{ - BoolFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, - }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if ctx.Bool("debug") != true { t.Errorf("main name not set from env") } @@ -687,11 +466,11 @@ func TestParseMultiBoolFromEnvCascade(t *testing.T) { } func TestParseMultiBoolT(t *testing.T) { - a := App{ - Flags: []Flag{ - BoolTFlag{Name: "serve, s"}, + a := cli.App{ + Flags: []cli.Flag{ + cli.BoolTFlag{Name: "serve, s"}, }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if ctx.BoolT("serve") != true { t.Errorf("main name not set") } @@ -703,51 +482,13 @@ func TestParseMultiBoolT(t *testing.T) { a.Run([]string{"run", "--serve"}) } -func TestParseDestinationBoolT(t *testing.T) { - var dest bool - a := App{ - Flags: []Flag{ - BoolTFlag{ - Name: "dest", - Destination: &dest, - }, - }, - Action: func(ctx *Context) { - if dest != true { - t.Errorf("expected destination BoolT true") - } - }, - } - a.Run([]string{"run", "--dest"}) -} - func TestParseMultiBoolTFromEnv(t *testing.T) { - os.Clearenv() os.Setenv("APP_DEBUG", "0") - a := App{ - Flags: []Flag{ - BoolTFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, + a := cli.App{ + Flags: []cli.Flag{ + cli.BoolTFlag{Name: "debug, d", EnvVar: "APP_DEBUG"}, }, - Action: func(ctx *Context) { - if ctx.BoolT("debug") != false { - t.Errorf("main name not set from env") - } - if ctx.BoolT("d") != false { - t.Errorf("short name not set from env") - } - }, - } - a.Run([]string{"run"}) -} - -func TestParseMultiBoolTFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_DEBUG", "0") - a := App{ - Flags: []Flag{ - BoolTFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"}, - }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if ctx.BoolT("debug") != false { t.Errorf("main name not set from env") } @@ -778,11 +519,11 @@ func (p *Parser) String() string { } func TestParseGeneric(t *testing.T) { - a := App{ - Flags: []Flag{ - GenericFlag{Name: "serve, s", Value: &Parser{}}, + a := cli.App{ + Flags: []cli.Flag{ + cli.GenericFlag{Name: "serve, s", Value: &Parser{}}, }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"10", "20"}) { t.Errorf("main name not set") } @@ -795,13 +536,12 @@ func TestParseGeneric(t *testing.T) { } func TestParseGenericFromEnv(t *testing.T) { - os.Clearenv() os.Setenv("APP_SERVE", "20,30") - a := App{ - Flags: []Flag{ - GenericFlag{Name: "serve, s", Value: &Parser{}, EnvVar: "APP_SERVE"}, + a := cli.App{ + Flags: []cli.Flag{ + cli.GenericFlag{Name: "serve, s", Value: &Parser{}, EnvVar: "APP_SERVE"}, }, - Action: func(ctx *Context) { + Action: func(ctx *cli.Context) { if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"20", "30"}) { t.Errorf("main name not set from env") } @@ -812,19 +552,3 @@ func TestParseGenericFromEnv(t *testing.T) { } a.Run([]string{"run"}) } - -func TestParseGenericFromEnvCascade(t *testing.T) { - os.Clearenv() - os.Setenv("APP_FOO", "99,2000") - a := App{ - Flags: []Flag{ - GenericFlag{Name: "foos", Value: &Parser{}, EnvVar: "COMPAT_FOO,APP_FOO"}, - }, - Action: func(ctx *Context) { - if !reflect.DeepEqual(ctx.Generic("foos"), &Parser{"99", "2000"}) { - t.Errorf("value not set from env") - } - }, - } - a.Run([]string{"run"}) -} diff --git a/vendor/github.com/codegangsta/cli/help.go b/vendor/github.com/codegangsta/cli/help.go index a246f63a..ccca0362 100644 --- a/vendor/github.com/codegangsta/cli/help.go +++ b/vendor/github.com/codegangsta/cli/help.go @@ -2,8 +2,7 @@ package cli import ( "fmt" - "io" - "strings" + "os" "text/tabwriter" "text/template" ) @@ -15,36 +14,30 @@ var AppHelpTemplate = `NAME: {{.Name}} - {{.Usage}} USAGE: - {{.HelpName}} {{if .Flags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} - {{if .Version}} + {{.Name}} {{ if .Flags }}[global options] {{ end }}command{{ if .Flags }} [command options]{{ end }} [arguments...] + VERSION: {{.Version}} - {{end}}{{if len .Authors}} -AUTHOR(S): - {{range .Authors}}{{ . }}{{end}} - {{end}}{{if .Commands}} + COMMANDS: - {{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} - {{end}}{{end}}{{if .Flags}} + {{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}} + {{end}}{{ if .Flags }} GLOBAL OPTIONS: {{range .Flags}}{{.}} - {{end}}{{end}}{{if .Copyright }} -COPYRIGHT: - {{.Copyright}} - {{end}} + {{end}}{{ end }} ` // The text template for the command help topic. // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var CommandHelpTemplate = `NAME: - {{.HelpName}} - {{.Usage}} + {{.Name}} - {{.Usage}} USAGE: - {{.HelpName}}{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Description}} + command {{.Name}}{{ if .Flags }} [command options]{{ end }} [arguments...] DESCRIPTION: - {{.Description}}{{end}}{{if .Flags}} + {{.Description}}{{ if .Flags }} OPTIONS: {{range .Flags}}{{.}} @@ -55,24 +48,23 @@ OPTIONS: // cli.go uses text/template to render templates. You can // render custom help text by setting this variable. var SubcommandHelpTemplate = `NAME: - {{.HelpName}} - {{.Usage}} + {{.Name}} - {{.Usage}} USAGE: - {{.HelpName}} command{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}} + {{.Name}} command{{ if .Flags }} [command options]{{ end }} [arguments...] COMMANDS: - {{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}} - {{end}}{{if .Flags}} + {{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}} + {{end}}{{ if .Flags }} OPTIONS: {{range .Flags}}{{.}} - {{end}}{{end}} + {{end}}{{ end }} ` var helpCommand = Command{ Name: "help", - Aliases: []string{"h"}, + ShortName: "h", Usage: "Shows a list of commands or help for one command", - ArgsUsage: "[command]", Action: func(c *Context) { args := c.Args() if args.Present() { @@ -85,9 +77,8 @@ var helpCommand = Command{ var helpSubcommand = Command{ Name: "help", - Aliases: []string{"h"}, + ShortName: "h", Usage: "Shows a list of commands or help for one command", - ArgsUsage: "[command]", Action: func(c *Context) { args := c.Args() if args.Present() { @@ -98,61 +89,47 @@ var helpSubcommand = Command{ }, } -// Prints help for the App or Command -type helpPrinter func(w io.Writer, templ string, data interface{}) - -var HelpPrinter helpPrinter = printHelp - -// Prints version for the App -var VersionPrinter = printVersion +// Prints help for the App +var HelpPrinter = printHelp func ShowAppHelp(c *Context) { - HelpPrinter(c.App.Writer, AppHelpTemplate, c.App) + HelpPrinter(AppHelpTemplate, c.App) } // Prints the list of subcommands as the default app completion method func DefaultAppComplete(c *Context) { for _, command := range c.App.Commands { - for _, name := range command.Names() { - fmt.Fprintln(c.App.Writer, name) + fmt.Println(command.Name) + if command.ShortName != "" { + fmt.Println(command.ShortName) } } } // Prints help for the given command -func ShowCommandHelp(ctx *Context, command string) { - // show the subcommand help for a command with subcommands - if command == "" { - HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App) - return - } - - for _, c := range ctx.App.Commands { +func ShowCommandHelp(c *Context, command string) { + for _, c := range c.App.Commands { if c.HasName(command) { - HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c) + HelpPrinter(CommandHelpTemplate, c) return } } - if ctx.App.CommandNotFound != nil { - ctx.App.CommandNotFound(ctx, command) + if c.App.CommandNotFound != nil { + c.App.CommandNotFound(c, command) } else { - fmt.Fprintf(ctx.App.Writer, "No help topic for '%v'\n", command) + fmt.Printf("No help topic for '%v'\n", command) } } // Prints help for the given subcommand func ShowSubcommandHelp(c *Context) { - ShowCommandHelp(c, c.Command.Name) + HelpPrinter(SubcommandHelpTemplate, c.App) } // Prints the version number of the App func ShowVersion(c *Context) { - VersionPrinter(c) -} - -func printVersion(c *Context) { - fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version) + fmt.Printf("%v version %v\n", c.App.Name, c.App.Version) } // Prints the lists of commands within a given context @@ -171,13 +148,9 @@ func ShowCommandCompletions(ctx *Context, command string) { } } -func printHelp(out io.Writer, templ string, data interface{}) { - funcMap := template.FuncMap{ - "join": strings.Join, - } - - w := tabwriter.NewWriter(out, 0, 8, 1, '\t', 0) - t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) +func printHelp(templ string, data interface{}) { + w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0) + t := template.Must(template.New("help").Parse(templ)) err := t.Execute(w, data) if err != nil { panic(err) @@ -186,27 +159,21 @@ func printHelp(out io.Writer, templ string, data interface{}) { } func checkVersion(c *Context) bool { - found := false - if VersionFlag.Name != "" { - eachName(VersionFlag.Name, func(name string) { - if c.GlobalBool(name) || c.Bool(name) { - found = true - } - }) + if c.GlobalBool("version") { + ShowVersion(c) + return true } - return found + + return false } func checkHelp(c *Context) bool { - found := false - if HelpFlag.Name != "" { - eachName(HelpFlag.Name, func(name string) { - if c.GlobalBool(name) || c.Bool(name) { - found = true - } - }) + if c.GlobalBool("h") || c.GlobalBool("help") { + ShowAppHelp(c) + return true } - return found + + return false } func checkCommandHelp(c *Context, name string) bool { @@ -228,7 +195,7 @@ func checkSubcommandHelp(c *Context) bool { } func checkCompletions(c *Context) bool { - if (c.GlobalBool(BashCompletionFlag.Name) || c.Bool(BashCompletionFlag.Name)) && c.App.EnableBashCompletion { + if c.GlobalBool(BashCompletionFlag.Name) && c.App.EnableBashCompletion { ShowCompletions(c) return true } diff --git a/vendor/github.com/codegangsta/cli/help_test.go b/vendor/github.com/codegangsta/cli/help_test.go deleted file mode 100644 index 350e2633..00000000 --- a/vendor/github.com/codegangsta/cli/help_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package cli - -import ( - "bytes" - "testing" -) - -func Test_ShowAppHelp_NoAuthor(t *testing.T) { - output := new(bytes.Buffer) - app := NewApp() - app.Writer = output - - c := NewContext(app, nil, nil) - - ShowAppHelp(c) - - if bytes.Index(output.Bytes(), []byte("AUTHOR(S):")) != -1 { - t.Errorf("expected\n%snot to include %s", output.String(), "AUTHOR(S):") - } -} - -func Test_ShowAppHelp_NoVersion(t *testing.T) { - output := new(bytes.Buffer) - app := NewApp() - app.Writer = output - - app.Version = "" - - c := NewContext(app, nil, nil) - - ShowAppHelp(c) - - if bytes.Index(output.Bytes(), []byte("VERSION:")) != -1 { - t.Errorf("expected\n%snot to include %s", output.String(), "VERSION:") - } -} - -func Test_Help_Custom_Flags(t *testing.T) { - oldFlag := HelpFlag - defer func() { - HelpFlag = oldFlag - }() - - HelpFlag = BoolFlag{ - Name: "help, x", - Usage: "show help", - } - - app := App{ - Flags: []Flag{ - BoolFlag{Name: "foo, h"}, - }, - Action: func(ctx *Context) { - if ctx.Bool("h") != true { - t.Errorf("custom help flag not set") - } - }, - } - output := new(bytes.Buffer) - app.Writer = output - app.Run([]string{"test", "-h"}) - if output.Len() > 0 { - t.Errorf("unexpected output: %s", output.String()) - } -} - -func Test_Version_Custom_Flags(t *testing.T) { - oldFlag := VersionFlag - defer func() { - VersionFlag = oldFlag - }() - - VersionFlag = BoolFlag{ - Name: "version, V", - Usage: "show version", - } - - app := App{ - Flags: []Flag{ - BoolFlag{Name: "foo, v"}, - }, - Action: func(ctx *Context) { - if ctx.Bool("v") != true { - t.Errorf("custom version flag not set") - } - }, - } - output := new(bytes.Buffer) - app.Writer = output - app.Run([]string{"test", "-v"}) - if output.Len() > 0 { - t.Errorf("unexpected output: %s", output.String()) - } -} diff --git a/vendor/github.com/codegangsta/cli/helpers_test.go b/vendor/github.com/codegangsta/cli/helpers_test.go index b1b7339f..cdc4feb2 100644 --- a/vendor/github.com/codegangsta/cli/helpers_test.go +++ b/vendor/github.com/codegangsta/cli/helpers_test.go @@ -1,4 +1,4 @@ -package cli +package cli_test import ( "reflect" @@ -7,13 +7,13 @@ import ( /* Test Helpers */ func expect(t *testing.T, a interface{}, b interface{}) { - if !reflect.DeepEqual(a, b) { + if a != b { t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) } } func refute(t *testing.T, a interface{}, b interface{}) { - if reflect.DeepEqual(a, b) { + if a == b { t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) } } diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap index 2c0af060..e4e50ee7 100644 --- a/vendor/github.com/docker/distribution/.mailmap +++ b/vendor/github.com/docker/distribution/.mailmap @@ -2,6 +2,14 @@ Stephen J Day Stephen Day Stephen Day Olivier Gambier Olivier Gambier Brian Bland Brian Bland +Brian Bland Brian Bland Josh Hawn Josh Hawn Richard Scothern Richard Richard Scothern Richard Scothern +Andrew Meredith Andrew Meredith +harche harche +Jessie Frazelle +Sharif Nassar Sharif Nassar +Sven Dowideit Sven Dowideit +Vincent Giersch Vincent Giersch +davidli davidli diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS index 5da5d1c6..a44266b0 100644 --- a/vendor/github.com/docker/distribution/AUTHORS +++ b/vendor/github.com/docker/distribution/AUTHORS @@ -1,21 +1,27 @@ Aaron Lehmann +Aaron Vinson Adam Enger Adrian Mouat Ahmet Alp Balkan Alex Chan Alex Elman +amitshukla Amy Lindburg +Andrew Meredith Andrey Kostov Andy Goldstein Anton Tiurin Antonio Mercado Arnaud Porterie +Arthur Baars +Avi Miller Ayose Cazorla BadZen Ben Firshman bin liu Brian Bland burnettk +Carson A Chris Dillon Daisuke Fujita Darren Shepherd @@ -25,55 +31,83 @@ David Lawrence David Verhasselt David Xia davidli +Dejan Golja Derek McGowan Diogo Mónica +DJ Enriquez Donald Huang Doug Davis +farmerworking Florentin Raud Frederick F. Kautz IV +gabriell nascimento +harche Henri Gomez Hu Keping +Hua Wang Ian Babrou +Jack Griffin +Jason Freidman Jeff Nickoloff -Jessie Frazelle +Jessie Frazelle Jianqing Wang Jon Poler +Jonathan Boulle Jordan Liggitt Josh Hawn Julien Fernandez Kelsey Hightower Kenneth Lim +Kenny Leung Li Yi +Liu Hua +Louis Kottmann Luke Carpenter Mary Anthony Matt Bentley +Matt Moore Matt Robenolt Michael Prokop +Miquel Sabaté +Morgan Bauer moxiegirl Nathan Sullivan nevermosby Nghia Tran +Nuutti Kotivuori Oilbeater Olivier Gambier Olivier Jacques Patrick Devine Philip Misiowiec Richard Scothern +Rodolfo Carvalho +Rusty Conover +Sean Boran Sebastiaan van Stijn +Sharif Nassar Shawn Falkner-Horine Shreyas Karnik Simon Thulbourn Spencer Rinehart Stephen J Day +Sungho Moon +Sven Dowideit Sylvain Baubeau +Ted Reed tgic Thomas Sjögren Tianon Gravi Tibor Vass +Tonis Tiigi +Trevor Pounds +Troels Thomsen Vincent Batts Vincent Demeester Vincent Giersch W. Trevor King +weiyuan.yl +xg.song xiekeyang Yann ROBERT yuzou diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile index 7a4b3e9e..5329cee7 100644 --- a/vendor/github.com/docker/distribution/Dockerfile +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.4 +FROM golang:1.5.3 RUN apt-get update && \ apt-get install -y librados-dev apache2-utils && \ diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS index 0abd7d4c..97f415db 100644 --- a/vendor/github.com/docker/distribution/MAINTAINERS +++ b/vendor/github.com/docker/distribution/MAINTAINERS @@ -1,4 +1,63 @@ -Solomon Hykes (@shykes) -Olivier Gambier (@dmp42) -Sam Alba (@samalba) -Stephen Day (@stevvooe) +# Distribution maintainers file +# +# This file describes who runs the docker/distribution project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aaronlehmann", + "dmcgowan", + "dmp42", + "richardscothern", + "shykes", + "stevvooe", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.brianbland] + Name = "Brian Bland" + Email = "brian.bland@docker.com" + GitHub = "BrianBland" + + [people.dmcgowan] + Name = "Derek McGowan" + Email = "derek@mcgstyle.net" + GitHub = "dmcgowan" + + [people.dmp42] + Name = "Olivier Gambier" + Email = "olivier@docker.com" + GitHub = "dmp42" + + [people.richardscothern] + Name = "Richard Scothern" + Email = "richard.scothern@gmail.com" + GitHub = "richardscothern" + + [people.shykes] + Name = "Solomon Hykes" + Email = "solomon@docker.com" + GitHub = "shykes" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile index 8b2d8fde..4604a39a 100644 --- a/vendor/github.com/docker/distribution/Makefile +++ b/vendor/github.com/docker/distribution/Makefile @@ -11,7 +11,7 @@ ifeq (${DISABLE_OPTIMIZATION},true) VERSION:="$(VERSION)-noopt" endif -GO_LDFLAGS=-ldflags "-X `go list ./version`.Version $(VERSION)" +GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" .PHONY: clean all fmt vet lint build test binaries .DEFAULT: default diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md index f7521669..e8262133 100644 --- a/vendor/github.com/docker/distribution/README.md +++ b/vendor/github.com/docker/distribution/README.md @@ -3,20 +3,23 @@ The Docker toolset to pack, ship, store, and deliver content. This repository's main product is the Docker Registry 2.0 implementation -for storing and distributing Docker images. It supersedes the [docker/docker- -registry](https://github.com/docker/docker-registry) project with a new API -design, focused around security and performance. +for storing and distributing Docker images. It supersedes the +[docker/docker-registry](https://github.com/docker/docker-registry) +project with a new API design, focused around security and performance. +[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) +[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) + This repository contains the following components: |**Component** |Description | |--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with,distribution components. Please see [godoc](http://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **libraries** | A rich set of libraries for interacting with,distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | | **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](http://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | ### How does this integrate with Docker engine? @@ -55,7 +58,7 @@ For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). ### Who needs to deploy a registry? By default, Docker users pull images from Docker's public registry instance. -[Installing Docker](http://docs.docker.com/installation) gives users this +[Installing Docker](https://docs.docker.com/engine/installation/) gives users this ability. Users can also push images to a repository on Docker's public registry, if they have a [Docker Hub](https://hub.docker.com/) account. diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go index 2087d0f9..ce43ea2e 100644 --- a/vendor/github.com/docker/distribution/blobs.go +++ b/vendor/github.com/docker/distribution/blobs.go @@ -9,6 +9,7 @@ import ( "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" ) var ( @@ -40,6 +41,18 @@ func (err ErrBlobInvalidDigest) Error() string { err.Digest, err.Reason) } +// ErrBlobMounted returned when a blob is mounted from another repository +// instead of initiating an upload session. +type ErrBlobMounted struct { + From reference.Canonical + Descriptor Descriptor +} + +func (err ErrBlobMounted) Error() string { + return fmt.Sprintf("blob mounted from: %v to: %v", + err.From, err.Descriptor) +} + // Descriptor describes targeted content. Used in conjunction with a blob // store, a descriptor can be used to fetch, store and target any kind of // blob. The struct also describes the wire protocol format. Fields should @@ -61,6 +74,15 @@ type Descriptor struct { // depend on the simplicity of this type. } +// Descriptor returns the descriptor, to make it satisfy the Describable +// interface. Note that implementations of Describable are generally objects +// which can be described, not simply descriptors; this exception is in place +// to make it more convenient to pass actual descriptors to functions that +// expect Describable objects. +func (d Descriptor) Descriptor() Descriptor { + return d +} + // BlobStatter makes blob descriptors available by digest. The service may // provide a descriptor of a different digest if the provided digest is not // canonical. @@ -142,12 +164,21 @@ type BlobIngester interface { // returned handle can be written to and later resumed using an opaque // identifier. With this approach, one can Close and Resume a BlobWriter // multiple times until the BlobWriter is committed or cancelled. - Create(ctx context.Context) (BlobWriter, error) + Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) // Resume attempts to resume a write to a blob, identified by an id. Resume(ctx context.Context, id string) (BlobWriter, error) } +// BlobCreateOption is a general extensible function argument for blob creation +// methods. A BlobIngester may choose to honor any or none of the given +// BlobCreateOptions, which can be specific to the implementation of the +// BlobIngester receiving them. +// TODO (brianbland): unify this with ManifestServiceOption in the future +type BlobCreateOption interface { + Apply(interface{}) error +} + // BlobWriter provides a handle for inserting data into a blob store. // Instances should be obtained from BlobWriteService.Writer and // BlobWriteService.Resume. If supported by the store, a writer can be diff --git a/vendor/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml index 4716eafa..e1995d4b 100644 --- a/vendor/github.com/docker/distribution/circle.yml +++ b/vendor/github.com/docker/distribution/circle.yml @@ -6,21 +6,19 @@ machine: # Install ceph to test rados driver & create pool - sudo -i ~/distribution/contrib/ceph/ci-setup.sh - ceph osd pool create docker-distribution 1 + # Install codecov for coverage + - pip install --user codecov post: - # Install many go versions - # - gvm install go1.3.3 -B --name=old - - gvm install go1.4.2 -B --name=stable - # - gvm install tip --name=bleed + # go + - gvm install go1.5.3 --prefer-binary --name=stable environment: # Convenient shortcuts to "common" locations CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME # Trick circle brainflat "no absolute path" behavior - BASE_OLD: ../../../$HOME/.gvm/pkgsets/old/global/$BASE_DIR BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR - # BASE_BLEED: ../../../$HOME/.gvm/pkgsets/bleed/global/$BASE_DIR DOCKER_BUILDTAGS: "include_rados include_oss include_gcs" # Workaround Circle parsing dumb bugs and/or YAML wonkyness CIRCLE_PAIN: "mode: set" @@ -34,46 +32,27 @@ machine: dependencies: pre: # Copy the code to the gopath of all go versions - # - > - # gvm use old && - # mkdir -p "$(dirname $BASE_OLD)" && - # cp -R "$CHECKOUT" "$BASE_OLD" - - > gvm use stable && mkdir -p "$(dirname $BASE_STABLE)" && cp -R "$CHECKOUT" "$BASE_STABLE" - # - > - # gvm use bleed && - # mkdir -p "$(dirname $BASE_BLEED)" && - # cp -R "$CHECKOUT" "$BASE_BLEED" - override: # Install dependencies for every copied clone/go version - # - gvm use old && go get github.com/tools/godep: - # pwd: $BASE_OLD - - gvm use stable && go get github.com/tools/godep: pwd: $BASE_STABLE - # - gvm use bleed && go get github.com/tools/godep: - # pwd: $BASE_BLEED - post: # For the stable go version, additionally install linting tools - > gvm use stable && go get github.com/axw/gocov/gocov github.com/golang/lint/golint - # Disabling goveralls for now - # go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint test: pre: # Output the go versions we are going to test # - gvm use old && go version - gvm use stable && go version - # - gvm use bleed && go version # First thing: build everything. This will catch compile errors, and it's # also necessary for go vet to work properly (see #807). @@ -84,7 +63,7 @@ test: - gvm use stable && test -z "$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)": pwd: $BASE_STABLE - # VET + # VET - gvm use stable && go vet ./...: pwd: $BASE_STABLE @@ -93,33 +72,17 @@ test: pwd: $BASE_STABLE override: - # Test every version we have (but stable) - # - gvm use old; godep go test -test.v -test.short ./...: - # timeout: 600 - # pwd: $BASE_OLD - - # - gvm use bleed; go test -test.v -test.short ./...: - # timeout: 600 - # pwd: $BASE_BLEED - # Test stable, and report - # Preset the goverall report file - - echo "$CIRCLE_PAIN" > ~/goverage.report - - gvm use stable; go list ./... | xargs -L 1 -I{} rm -f $GOPATH/src/{}/coverage.out: - pwd: $BASE_STABLE - - gvm use stable; go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/{}/coverage.out {}: - timeout: 600 - pwd: $BASE_STABLE + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': + timeout: 600 + pwd: $BASE_STABLE post: - # Aggregate and report to coveralls - - gvm use stable; go list ./... | xargs -L 1 -I{} cat "$GOPATH/src/{}/coverage.out" | grep -v "$CIRCLE_PAIN" >> ~/goverage.report: + # Report to codecov + - bash <(curl -s https://codecov.io/bash): pwd: $BASE_STABLE -# - gvm use stable; goveralls -service circleci -coverprofile=/home/ubuntu/goverage.report -repotoken $COVERALLS_TOKEN: -# pwd: $BASE_STABLE ## Notes - # Disabled coveralls reporting: build breaking sending coverage data to coveralls # Disabled the -race detector due to massive memory usage. # Do we want these as well? # - go get code.google.com/p/go.tools/cmd/goimports diff --git a/vendor/github.com/docker/distribution/coverpkg.sh b/vendor/github.com/docker/distribution/coverpkg.sh new file mode 100755 index 00000000..7ee751ab --- /dev/null +++ b/vendor/github.com/docker/distribution/coverpkg.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# Given a subpackage and the containing package, figures out which packages +# need to be passed to `go test -coverpkg`: this includes all of the +# subpackage's dependencies within the containing package, as well as the +# subpackage itself. +DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2})" +echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/vendor/github.com/docker/distribution/digest/digest.go b/vendor/github.com/docker/distribution/digest/digest.go index a0221216..31d821bb 100644 --- a/vendor/github.com/docker/distribution/digest/digest.go +++ b/vendor/github.com/docker/distribution/digest/digest.go @@ -1,21 +1,14 @@ package digest import ( - "bytes" "fmt" "hash" "io" - "io/ioutil" "regexp" "strings" - - "github.com/docker/docker/pkg/tarsum" ) const ( - // DigestTarSumV1EmptyTar is the digest for the empty tar file. - DigestTarSumV1EmptyTar = "tarsum.v1+sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - // DigestSha256EmptyTar is the canonical sha256 digest of empty data DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" ) @@ -29,18 +22,21 @@ const ( // // sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc // -// More important for this code base, this type is compatible with tarsum -// digests. For example, the following would be a valid Digest: -// -// tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b -// // This allows to abstract the digest behind this type and work only in those // terms. type Digest string // NewDigest returns a Digest from alg and a hash.Hash object. func NewDigest(alg Algorithm, h hash.Hash) Digest { - return Digest(fmt.Sprintf("%s:%x", alg, h.Sum(nil))) + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return Digest(fmt.Sprintf("%s:%x", alg, p)) } // NewDigestFromHex returns a Digest from alg and a the hex encoded digest. @@ -58,6 +54,9 @@ var ( // ErrDigestInvalidFormat returned when digest format invalid. ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + // ErrDigestInvalidLength returned when digest has invalid length. + ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") + // ErrDigestUnsupported returned when the digest algorithm is unsupported. ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") ) @@ -76,41 +75,15 @@ func FromReader(rd io.Reader) (Digest, error) { return Canonical.FromReader(rd) } -// FromTarArchive produces a tarsum digest from reader rd. -func FromTarArchive(rd io.Reader) (Digest, error) { - ts, err := tarsum.NewTarSum(rd, true, tarsum.Version1) - if err != nil { - return "", err - } - - if _, err := io.Copy(ioutil.Discard, ts); err != nil { - return "", err - } - - d, err := ParseDigest(ts.Sum(nil)) - if err != nil { - return "", err - } - - return d, nil -} - // FromBytes digests the input and returns a Digest. -func FromBytes(p []byte) (Digest, error) { - return FromReader(bytes.NewReader(p)) +func FromBytes(p []byte) Digest { + return Canonical.FromBytes(p) } // Validate checks that the contents of d is a valid digest, returning an // error if not. func (d Digest) Validate() error { s := string(d) - // Common case will be tarsum - _, err := ParseTarSum(s) - if err == nil { - return nil - } - - // Continue on for general parser if !DigestRegexpAnchored.MatchString(s) { return ErrDigestInvalidFormat @@ -126,8 +99,11 @@ func (d Digest) Validate() error { return ErrDigestInvalidFormat } - switch Algorithm(s[:i]) { + switch algorithm := Algorithm(s[:i]); algorithm { case SHA256, SHA384, SHA512: + if algorithm.Size()*2 != len(s[i+1:]) { + return ErrDigestInvalidLength + } break default: return ErrDigestUnsupported diff --git a/vendor/github.com/docker/distribution/digest/digest_test.go b/vendor/github.com/docker/distribution/digest/digest_test.go index 41c8bee8..afb4ebf6 100644 --- a/vendor/github.com/docker/distribution/digest/digest_test.go +++ b/vendor/github.com/docker/distribution/digest/digest_test.go @@ -1,8 +1,6 @@ package digest import ( - "bytes" - "io" "testing" ) @@ -13,21 +11,6 @@ func TestParseDigest(t *testing.T) { algorithm Algorithm hex string }{ - { - input: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - algorithm: "tarsum+sha256", - hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - }, - { - input: "tarsum.dev+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - algorithm: "tarsum.dev+sha256", - hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - }, - { - input: "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", - algorithm: "tarsum.v1+sha256", - hex: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", - }, { input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", algorithm: "sha256", @@ -53,6 +36,16 @@ func TestParseDigest(t *testing.T) { input: "sha256:d41d8cd98f00b204e9800m98ecf8427e", err: ErrDigestInvalidFormat, }, + { + // too short + input: "sha256:abcdef0123456789", + err: ErrDigestInvalidLength, + }, + { + // too short (from different algorithm) + input: "sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + err: ErrDigestInvalidLength, + }, { input: "foo:d41d8cd98f00b204e9800998ecf8427e", err: ErrDigestUnsupported, @@ -87,25 +80,3 @@ func TestParseDigest(t *testing.T) { } } } - -// A few test cases used to fix behavior we expect in storage backend. - -func TestFromTarArchiveZeroLength(t *testing.T) { - checkTarsumDigest(t, "zero-length archive", bytes.NewReader([]byte{}), DigestTarSumV1EmptyTar) -} - -func TestFromTarArchiveEmptyTar(t *testing.T) { - // String of 1024 zeros is a valid, empty tar file. - checkTarsumDigest(t, "1024 zero bytes", bytes.NewReader(bytes.Repeat([]byte("\x00"), 1024)), DigestTarSumV1EmptyTar) -} - -func checkTarsumDigest(t *testing.T, msg string, rd io.Reader, expected Digest) { - dgst, err := FromTarArchive(rd) - if err != nil { - t.Fatalf("unexpected error digesting %s: %v", msg, err) - } - - if dgst != expected { - t.Fatalf("unexpected digest for %s: %q != %q", msg, dgst, expected) - } -} diff --git a/vendor/github.com/docker/distribution/digest/digester.go b/vendor/github.com/docker/distribution/digest/digester.go index 4f03e189..f3105a45 100644 --- a/vendor/github.com/docker/distribution/digest/digester.go +++ b/vendor/github.com/docker/distribution/digest/digester.go @@ -2,6 +2,7 @@ package digest import ( "crypto" + "fmt" "hash" "io" ) @@ -13,10 +14,9 @@ type Algorithm string // supported digest types const ( - SHA256 Algorithm = "sha256" // sha256 with hex encoding - SHA384 Algorithm = "sha384" // sha384 with hex encoding - SHA512 Algorithm = "sha512" // sha512 with hex encoding - TarsumV1SHA256 Algorithm = "tarsum+v1+sha256" // supported tarsum version, verification only + SHA256 Algorithm = "sha256" // sha256 with hex encoding + SHA384 Algorithm = "sha384" // sha384 with hex encoding + SHA512 Algorithm = "sha512" // sha512 with hex encoding // Canonical is the primary digest algorithm used with the distribution // project. Other digests may be used but this one is the primary storage @@ -54,6 +54,15 @@ func (a Algorithm) String() string { return string(a) } +// Size returns number of bytes returned by the hash. +func (a Algorithm) Size() int { + h, ok := algorithms[a] + if !ok { + return 0 + } + return h.Size() +} + // Set implemented to allow use of Algorithm as a command line flag. func (a *Algorithm) Set(value string) error { if value == "" { @@ -76,11 +85,18 @@ func (a Algorithm) New() Digester { } } -// Hash returns a new hash as used by the algorithm. If not available, nil is -// returned. Make sure to check Available before calling. +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. func (a Algorithm) Hash() hash.Hash { if !a.Available() { - return nil + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) } return algorithms[a].New() @@ -97,6 +113,22 @@ func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { return digester.Digest(), nil } +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.New() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + // TODO(stevvooe): Allow resolution of verifiers using the digest type and // this registration system. diff --git a/vendor/github.com/docker/distribution/digest/doc.go b/vendor/github.com/docker/distribution/digest/doc.go index 278c50e0..f64b0db3 100644 --- a/vendor/github.com/docker/distribution/digest/doc.go +++ b/vendor/github.com/docker/distribution/digest/doc.go @@ -1,7 +1,7 @@ // Package digest provides a generalized type to opaquely represent message // digests and their operations within the registry. The Digest type is // designed to serve as a flexible identifier in a content-addressable system. -// More importantly, it provides tools and wrappers to work with tarsums and +// More importantly, it provides tools and wrappers to work with // hash.Hash-based digests with little effort. // // Basics @@ -16,17 +16,7 @@ // sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc // // In this case, the string "sha256" is the algorithm and the hex bytes are -// the "digest". A tarsum example will be more illustrative of the use case -// involved in the registry: -// -// tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b -// -// For this, we consider the algorithm to be "tarsum+sha256". Prudent -// applications will favor the ParseDigest function to verify the format over -// using simple type casts. However, a normal string can be cast as a digest -// with a simple type conversion: -// -// Digest("tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b") +// the "digest". // // Because the Digest type is simply a string, once a valid Digest is // obtained, comparisons are cheap, quick and simple to express with the diff --git a/vendor/github.com/docker/distribution/digest/set_test.go b/vendor/github.com/docker/distribution/digest/set_test.go index 0c0f650d..e9dab879 100644 --- a/vendor/github.com/docker/distribution/digest/set_test.go +++ b/vendor/github.com/docker/distribution/digest/set_test.go @@ -15,14 +15,14 @@ func assertEqualDigests(t *testing.T, d1, d2 Digest) { func TestLookup(t *testing.T) { digests := []Digest{ - "sha256:12345", - "sha256:1234", - "sha256:12346", - "sha256:54321", - "sha256:65431", - "sha256:64321", - "sha256:65421", - "sha256:65321", + "sha256:1234511111111111111111111111111111111111111111111111111111111111", + "sha256:1234111111111111111111111111111111111111111111111111111111111111", + "sha256:1234611111111111111111111111111111111111111111111111111111111111", + "sha256:5432111111111111111111111111111111111111111111111111111111111111", + "sha256:6543111111111111111111111111111111111111111111111111111111111111", + "sha256:6432111111111111111111111111111111111111111111111111111111111111", + "sha256:6542111111111111111111111111111111111111111111111111111111111111", + "sha256:6532111111111111111111111111111111111111111111111111111111111111", } dset := NewSet() @@ -55,10 +55,12 @@ func TestLookup(t *testing.T) { } dgst, err = dset.Lookup("sha256:1234") - if err != nil { + if err == nil { + t.Fatal("Expected ambiguous error looking up: sha256:1234") + } + if err != ErrDigestAmbiguous { t.Fatal(err) } - assertEqualDigests(t, dgst, digests[1]) dgst, err = dset.Lookup("sha256:12345") if err != nil { @@ -87,14 +89,14 @@ func TestLookup(t *testing.T) { func TestAddDuplication(t *testing.T) { digests := []Digest{ - "sha256:1234", - "sha256:12345", - "sha256:12346", - "sha256:54321", - "sha256:65431", - "sha512:65431", - "sha512:65421", - "sha512:65321", + "sha256:1234111111111111111111111111111111111111111111111111111111111111", + "sha256:1234511111111111111111111111111111111111111111111111111111111111", + "sha256:1234611111111111111111111111111111111111111111111111111111111111", + "sha256:5432111111111111111111111111111111111111111111111111111111111111", + "sha256:6543111111111111111111111111111111111111111111111111111111111111", + "sha512:65431111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "sha512:65421111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "sha512:65321111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", } dset := NewSet() @@ -108,7 +110,7 @@ func TestAddDuplication(t *testing.T) { t.Fatal("Invalid dset size") } - if err := dset.Add(Digest("sha256:12345")); err != nil { + if err := dset.Add(Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil { t.Fatal(err) } @@ -116,7 +118,7 @@ func TestAddDuplication(t *testing.T) { t.Fatal("Duplicate digest insert allowed") } - if err := dset.Add(Digest("sha384:12345")); err != nil { + if err := dset.Add(Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil { t.Fatal(err) } @@ -193,14 +195,14 @@ func assertEqualShort(t *testing.T, actual, expected string) { func TestShortCodeTable(t *testing.T) { digests := []Digest{ - "sha256:1234", - "sha256:12345", - "sha256:12346", - "sha256:54321", - "sha256:65431", - "sha256:64321", - "sha256:65421", - "sha256:65321", + "sha256:1234111111111111111111111111111111111111111111111111111111111111", + "sha256:1234511111111111111111111111111111111111111111111111111111111111", + "sha256:1234611111111111111111111111111111111111111111111111111111111111", + "sha256:5432111111111111111111111111111111111111111111111111111111111111", + "sha256:6543111111111111111111111111111111111111111111111111111111111111", + "sha256:6432111111111111111111111111111111111111111111111111111111111111", + "sha256:6542111111111111111111111111111111111111111111111111111111111111", + "sha256:6532111111111111111111111111111111111111111111111111111111111111", } dset := NewSet() @@ -215,10 +217,9 @@ func TestShortCodeTable(t *testing.T) { if len(dump) < len(digests) { t.Fatalf("Error unexpected size: %d, expecting %d", len(dump), len(digests)) } - - assertEqualShort(t, dump[digests[0]], "sha256:1234") - assertEqualShort(t, dump[digests[1]], "sha256:12345") - assertEqualShort(t, dump[digests[2]], "sha256:12346") + assertEqualShort(t, dump[digests[0]], "12341") + assertEqualShort(t, dump[digests[1]], "12345") + assertEqualShort(t, dump[digests[2]], "12346") assertEqualShort(t, dump[digests[3]], "54") assertEqualShort(t, dump[digests[4]], "6543") assertEqualShort(t, dump[digests[5]], "64") diff --git a/vendor/github.com/docker/distribution/digest/tarsum.go b/vendor/github.com/docker/distribution/digest/tarsum.go deleted file mode 100644 index 9effeb2e..00000000 --- a/vendor/github.com/docker/distribution/digest/tarsum.go +++ /dev/null @@ -1,70 +0,0 @@ -package digest - -import ( - "fmt" - - "regexp" -) - -// TarsumRegexp defines a regular expression to match tarsum identifiers. -var TarsumRegexp = regexp.MustCompile("tarsum(?:.[a-z0-9]+)?\\+[a-zA-Z0-9]+:[A-Fa-f0-9]+") - -// TarsumRegexpCapturing defines a regular expression to match tarsum identifiers with -// capture groups corresponding to each component. -var TarsumRegexpCapturing = regexp.MustCompile("(tarsum)(.([a-z0-9]+))?\\+([a-zA-Z0-9]+):([A-Fa-f0-9]+)") - -// TarSumInfo contains information about a parsed tarsum. -type TarSumInfo struct { - // Version contains the version of the tarsum. - Version string - - // Algorithm contains the algorithm for the final digest - Algorithm string - - // Digest contains the hex-encoded digest. - Digest string -} - -// InvalidTarSumError provides informations about a TarSum that cannot be parsed -// by ParseTarSum. -type InvalidTarSumError string - -func (e InvalidTarSumError) Error() string { - return fmt.Sprintf("invalid tarsum: %q", string(e)) -} - -// ParseTarSum parses a tarsum string into its components of interest. For -// example, this method may receive the tarsum in the following format: -// -// tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e -// -// The function will return the following: -// -// TarSumInfo{ -// Version: "v1", -// Algorithm: "sha256", -// Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", -// } -// -func ParseTarSum(tarSum string) (tsi TarSumInfo, err error) { - components := TarsumRegexpCapturing.FindStringSubmatch(tarSum) - - if len(components) != 1+TarsumRegexpCapturing.NumSubexp() { - return TarSumInfo{}, InvalidTarSumError(tarSum) - } - - return TarSumInfo{ - Version: components[3], - Algorithm: components[4], - Digest: components[5], - }, nil -} - -// String returns the valid, string representation of the tarsum info. -func (tsi TarSumInfo) String() string { - if tsi.Version == "" { - return fmt.Sprintf("tarsum+%s:%s", tsi.Algorithm, tsi.Digest) - } - - return fmt.Sprintf("tarsum.%s+%s:%s", tsi.Version, tsi.Algorithm, tsi.Digest) -} diff --git a/vendor/github.com/docker/distribution/digest/tarsum_test.go b/vendor/github.com/docker/distribution/digest/tarsum_test.go deleted file mode 100644 index 894c25ab..00000000 --- a/vendor/github.com/docker/distribution/digest/tarsum_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package digest - -import ( - "reflect" - "testing" -) - -func TestParseTarSumComponents(t *testing.T) { - for _, testcase := range []struct { - input string - expected TarSumInfo - err error - }{ - { - input: "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", - expected: TarSumInfo{ - Version: "v1", - Algorithm: "sha256", - Digest: "220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e", - }, - }, - { - input: "", - err: InvalidTarSumError(""), - }, - { - input: "purejunk", - err: InvalidTarSumError("purejunk"), - }, - { - input: "tarsum.v23+test:12341234123412341effefefe", - expected: TarSumInfo{ - Version: "v23", - Algorithm: "test", - Digest: "12341234123412341effefefe", - }, - }, - - // The following test cases are ported from docker core - { - // Version 0 tarsum - input: "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - expected: TarSumInfo{ - Algorithm: "sha256", - Digest: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - }, - }, - { - // Dev version tarsum - input: "tarsum.dev+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - expected: TarSumInfo{ - Version: "dev", - Algorithm: "sha256", - Digest: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", - }, - }, - } { - tsi, err := ParseTarSum(testcase.input) - if err != nil { - if testcase.err != nil && err == testcase.err { - continue // passes - } - - t.Fatalf("unexpected error parsing tarsum: %v", err) - } - - if testcase.err != nil { - t.Fatalf("expected error not encountered on %q: %v", testcase.input, testcase.err) - } - - if !reflect.DeepEqual(tsi, testcase.expected) { - t.Fatalf("expected tarsum info: %v != %v", tsi, testcase.expected) - } - - if testcase.input != tsi.String() { - t.Fatalf("input should equal output: %q != %q", tsi.String(), testcase.input) - } - } -} diff --git a/vendor/github.com/docker/distribution/digest/verifiers.go b/vendor/github.com/docker/distribution/digest/verifiers.go index f8c75b53..9af3be13 100644 --- a/vendor/github.com/docker/distribution/digest/verifiers.go +++ b/vendor/github.com/docker/distribution/digest/verifiers.go @@ -3,9 +3,6 @@ package digest import ( "hash" "io" - "io/ioutil" - - "github.com/docker/docker/pkg/tarsum" ) // Verifier presents a general verification interface to be used with message @@ -27,70 +24,10 @@ func NewDigestVerifier(d Digest) (Verifier, error) { return nil, err } - alg := d.Algorithm() - switch alg { - case "sha256", "sha384", "sha512": - return hashVerifier{ - hash: alg.Hash(), - digest: d, - }, nil - default: - // Assume we have a tarsum. - version, err := tarsum.GetVersionFromTarsum(string(d)) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - - // TODO(stevvooe): We may actually want to ban the earlier versions of - // tarsum. That decision may not be the place of the verifier. - - ts, err := tarsum.NewTarSum(pr, true, version) - if err != nil { - return nil, err - } - - // TODO(sday): Ick! A goroutine per digest verification? We'll have to - // get the tarsum library to export an io.Writer variant. - go func() { - if _, err := io.Copy(ioutil.Discard, ts); err != nil { - pr.CloseWithError(err) - } else { - pr.Close() - } - }() - - return &tarsumVerifier{ - digest: d, - ts: ts, - pr: pr, - pw: pw, - }, nil - } -} - -// NewLengthVerifier returns a verifier that returns true when the number of -// read bytes equals the expected parameter. -func NewLengthVerifier(expected int64) Verifier { - return &lengthVerifier{ - expected: expected, - } -} - -type lengthVerifier struct { - expected int64 // expected bytes read - len int64 // bytes read -} - -func (lv *lengthVerifier) Write(p []byte) (n int, err error) { - n = len(p) - lv.len += int64(n) - return n, err -} - -func (lv *lengthVerifier) Verified() bool { - return lv.expected == lv.len + return hashVerifier{ + hash: d.Algorithm().Hash(), + digest: d, + }, nil } type hashVerifier struct { @@ -105,18 +42,3 @@ func (hv hashVerifier) Write(p []byte) (n int, err error) { func (hv hashVerifier) Verified() bool { return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) } - -type tarsumVerifier struct { - digest Digest - ts tarsum.TarSum - pr *io.PipeReader - pw *io.PipeWriter -} - -func (tv *tarsumVerifier) Write(p []byte) (n int, err error) { - return tv.pw.Write(p) -} - -func (tv *tarsumVerifier) Verified() bool { - return tv.digest == Digest(tv.ts.Sum(nil)) -} diff --git a/vendor/github.com/docker/distribution/digest/verifiers_test.go b/vendor/github.com/docker/distribution/digest/verifiers_test.go index 5ee79f34..c342d6e7 100644 --- a/vendor/github.com/docker/distribution/digest/verifiers_test.go +++ b/vendor/github.com/docker/distribution/digest/verifiers_test.go @@ -3,22 +3,14 @@ package digest import ( "bytes" "crypto/rand" - "encoding/base64" "io" - "os" - "strings" "testing" - - "github.com/docker/distribution/testutil" ) func TestDigestVerifier(t *testing.T) { p := make([]byte, 1<<20) rand.Read(p) - digest, err := FromBytes(p) - if err != nil { - t.Fatalf("unexpected error digesting bytes: %#v", err) - } + digest := FromBytes(p) verifier, err := NewDigestVerifier(digest) if err != nil { @@ -30,43 +22,6 @@ func TestDigestVerifier(t *testing.T) { if !verifier.Verified() { t.Fatalf("bytes not verified") } - - tf, tarSum, err := testutil.CreateRandomTarFile() - if err != nil { - t.Fatalf("error creating tarfile: %v", err) - } - - digest, err = FromTarArchive(tf) - if err != nil { - t.Fatalf("error digesting tarsum: %v", err) - } - - if digest.String() != tarSum { - t.Fatalf("unexpected digest: %q != %q", digest.String(), tarSum) - } - - expectedSize, _ := tf.Seek(0, os.SEEK_END) // Get tar file size - tf.Seek(0, os.SEEK_SET) // seek back - - // This is the most relevant example for the registry application. It's - // effectively a read through pipeline, where the final sink is the digest - // verifier. - verifier, err = NewDigestVerifier(digest) - if err != nil { - t.Fatalf("unexpected error getting digest verifier: %s", err) - } - - lengthVerifier := NewLengthVerifier(expectedSize) - rd := io.TeeReader(tf, lengthVerifier) - io.Copy(verifier, rd) - - if !lengthVerifier.Verified() { - t.Fatalf("verifier detected incorrect length") - } - - if !verifier.Verified() { - t.Fatalf("bytes not verified") - } } // TestVerifierUnsupportedDigest ensures that unsupported digest validation is @@ -84,79 +39,11 @@ func TestVerifierUnsupportedDigest(t *testing.T) { } } -// TestJunkNoDeadlock ensures that junk input into a digest verifier properly -// returns errors from the tarsum library. Specifically, we pass in a file -// with a "bad header" and should see the error from the io.Copy to verifier. -// This has been seen with gzipped tarfiles, mishandled by the tarsum package, -// but also on junk input, such as html. -func TestJunkNoDeadlock(t *testing.T) { - expected := Digest("tarsum.dev+sha256:62e15750aae345f6303469a94892e66365cc5e3abdf8d7cb8b329f8fb912e473") - junk := bytes.Repeat([]byte{'a'}, 1024) - - verifier, err := NewDigestVerifier(expected) - if err != nil { - t.Fatalf("unexpected error creating verifier: %v", err) - } - - rd := bytes.NewReader(junk) - if _, err := io.Copy(verifier, rd); err == nil { - t.Fatalf("unexpected error verifying input data: %v", err) - } -} - -// TestBadTarNoDeadlock runs a tar with a "bad" tar header through digest -// verifier, ensuring that the verifier returns an error properly. -func TestBadTarNoDeadlock(t *testing.T) { - // TODO(stevvooe): This test is exposing a bug in tarsum where if we pass - // a gzipped tar file into tarsum, the library returns an error. This - // should actually work. When the tarsum package is fixed, this test will - // fail and we can remove this test or invert it. - - // This tarfile was causing deadlocks in verifiers due mishandled copy error. - // This is a gzipped tar, which we typically don't see but should handle. - // - // From https://registry-1.docker.io/v2/library/ubuntu/blobs/tarsum.dev+sha256:62e15750aae345f6303469a94892e66365cc5e3abdf8d7cb8b329f8fb912e473 - const badTar = ` -H4sIAAAJbogA/0otSdZnoDEwMDAxMDc1BdJggE6D2YZGJobGBmbGRsZAdYYGBkZGDAqmtHYYCJQW -lyQWAZ1CqTnonhsiAAAAAP//AsV/YkEJTdMAGfFvZmA2Gv/0AAAAAAD//4LFf3F+aVFyarFeTmZx -CbXtAOVnMxMTXPFvbGpmjhb/xobmwPinSyCO8PgHAAAA///EVU9v2z4MvedTEMihl9a5/26/YTkU -yNKiTTDsKMt0rE0WDYmK628/ym7+bFmH2DksQACbIB/5+J7kObwiQsXc/LdYVGibLObRccw01Qv5 -19EZ7hbbZudVgWtiDFCSh4paYII4xOVxNgeHLXrYow+GXAAqgSuEQhzlTR5ZgtlsVmB+aKe8rswe -zzsOjwtoPGoTEGplHHhMCJqxSNUPwesbEGbzOXxR34VCHndQmjfhUKhEq/FURI0FqJKFR5q9NE5Z -qbaoBGoglAB+5TSK0sOh3c3UPkRKE25dEg8dDzzIWmqN2wG3BNY4qRL1VFFAoJJb5SXHU90n34nk -SUS8S0AeGwqGyXdZel1nn7KLGhPO0kDeluvN48ty9Q2269ft8/PTy2b5GfKuh9/2LBIWo6oz+N8G -uodmWLETg0mW4lMP4XYYCL4+rlawftpIO40SA+W6Yci9wRZE1MNOjmyGdhBQRy9OHpqOdOGh/wT7 -nZdOkHZ650uIK+WrVZdkgErJfnNEJysLnI5FSAj4xuiCQNpOIoNWmhyLByVHxEpLf3dkr+k9KMsV -xV0FhiVB21hgD3V5XwSqRdOmsUYr7oNtZXTVzyTHc2/kqokBy2ihRMVRTN+78goP5Ur/aMhz+KOJ -3h2UsK43kdwDo0Q9jfD7ie2RRur7MdpIrx1Z3X4j/Q1qCswN9r/EGCvXiUy0fI4xeSknnH/92T/+ -fgIAAP//GkWjYBSMXAAIAAD//2zZtzAAEgAA` - expected := Digest("tarsum.dev+sha256:62e15750aae345f6303469a94892e66365cc5e3abdf8d7cb8b329f8fb912e473") - - verifier, err := NewDigestVerifier(expected) - if err != nil { - t.Fatalf("unexpected error creating verifier: %v", err) - } - - rd := base64.NewDecoder(base64.StdEncoding, strings.NewReader(badTar)) - - if _, err := io.Copy(verifier, rd); err == nil { - t.Fatalf("unexpected error verifying input data: %v", err) - } - - if verifier.Verified() { - // For now, we expect an error, since tarsum library cannot handle - // compressed tars (!!!). - t.Fatalf("no error received after invalid tar") - } -} - // TODO(stevvooe): Add benchmarks to measure bytes/second throughput for -// DigestVerifier. We should be tarsum/gzip limited for common cases but we -// want to verify this. +// DigestVerifier. // -// The relevant benchmarks for comparison can be run with the following +// The relevant benchmark for comparison can be run with the following // commands: // // go test -bench . crypto/sha1 -// go test -bench . github.com/docker/docker/pkg/tarsum // diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go index eb332d1b..77bd096e 100644 --- a/vendor/github.com/docker/distribution/errors.go +++ b/vendor/github.com/docker/distribution/errors.go @@ -16,6 +16,15 @@ var ErrManifestNotModified = errors.New("manifest not modified") // performed var ErrUnsupported = errors.New("operation unsupported") +// ErrTagUnknown is returned if the given tag is not known by the tag service +type ErrTagUnknown struct { + Tag string +} + +func (err ErrTagUnknown) Error() string { + return fmt.Sprintf("unknown tag=%s", err.Tag) +} + // ErrRepositoryUnknown is returned if the named repository is not known by // the registry. type ErrRepositoryUnknown struct { @@ -89,3 +98,14 @@ type ErrManifestBlobUnknown struct { func (err ErrManifestBlobUnknown) Error() string { return fmt.Sprintf("unknown blob %v on manifest", err.Digest) } + +// ErrManifestNameInvalid should be used to denote an invalid manifest +// name. Reason may set, indicating the cause of invalidity. +type ErrManifestNameInvalid struct { + Name string + Reason error +} + +func (err ErrManifestNameInvalid) Error() string { + return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) +} diff --git a/vendor/github.com/docker/distribution/manifest/doc.go b/vendor/github.com/docker/distribution/manifest/doc.go deleted file mode 100644 index 88367b0a..00000000 --- a/vendor/github.com/docker/distribution/manifest/doc.go +++ /dev/null @@ -1 +0,0 @@ -package manifest diff --git a/vendor/github.com/docker/distribution/manifest/schema1/manifest.go b/vendor/github.com/docker/distribution/manifest/schema1/manifest.go deleted file mode 100644 index e7cbf958..00000000 --- a/vendor/github.com/docker/distribution/manifest/schema1/manifest.go +++ /dev/null @@ -1,130 +0,0 @@ -package schema1 - -import ( - "encoding/json" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/libtrust" -) - -// TODO(stevvooe): When we rev the manifest format, the contents of this -// package should be moved to manifest/v1. - -const ( - // ManifestMediaType specifies the mediaType for the current version. Note - // that for schema version 1, the the media is optionally - // "application/json". - ManifestMediaType = "application/vnd.docker.distribution.manifest.v1+json" -) - -var ( - // SchemaVersion provides a pre-initialized version structure for this - // packages version of the manifest. - SchemaVersion = manifest.Versioned{ - SchemaVersion: 1, - } -) - -// Manifest provides the base accessible fields for working with V2 image -// format in the registry. -type Manifest struct { - manifest.Versioned - - // Name is the name of the image's repository - Name string `json:"name"` - - // Tag is the tag of the image specified by this manifest - Tag string `json:"tag"` - - // Architecture is the host architecture on which this image is intended to - // run - Architecture string `json:"architecture"` - - // FSLayers is a list of filesystem layer blobSums contained in this image - FSLayers []FSLayer `json:"fsLayers"` - - // History is a list of unstructured historical data for v1 compatibility - History []History `json:"history"` -} - -// SignedManifest provides an envelope for a signed image manifest, including -// the format sensitive raw bytes. It contains fields to -type SignedManifest struct { - Manifest - - // Raw is the byte representation of the ImageManifest, used for signature - // verification. The value of Raw must be used directly during - // serialization, or the signature check will fail. The manifest byte - // representation cannot change or it will have to be re-signed. - Raw []byte `json:"-"` -} - -// UnmarshalJSON populates a new ImageManifest struct from JSON data. -func (sm *SignedManifest) UnmarshalJSON(b []byte) error { - sm.Raw = make([]byte, len(b), len(b)) - copy(sm.Raw, b) - - p, err := sm.Payload() - if err != nil { - return err - } - - var manifest Manifest - if err := json.Unmarshal(p, &manifest); err != nil { - return err - } - - sm.Manifest = manifest - return nil -} - -// Payload returns the raw, signed content of the signed manifest. The -// contents can be used to calculate the content identifier. -func (sm *SignedManifest) Payload() ([]byte, error) { - jsig, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") - if err != nil { - return nil, err - } - - // Resolve the payload in the manifest. - return jsig.Payload() -} - -// Signatures returns the signatures as provided by -// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws -// signatures. -func (sm *SignedManifest) Signatures() ([][]byte, error) { - jsig, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") - if err != nil { - return nil, err - } - - // Resolve the payload in the manifest. - return jsig.Signatures() -} - -// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner -// contents. Applications requiring a marshaled signed manifest should simply -// use Raw directly, since the the content produced by json.Marshal will be -// compacted and will fail signature checks. -func (sm *SignedManifest) MarshalJSON() ([]byte, error) { - if len(sm.Raw) > 0 { - return sm.Raw, nil - } - - // If the raw data is not available, just dump the inner content. - return json.Marshal(&sm.Manifest) -} - -// FSLayer is a container struct for BlobSums defined in an image manifest -type FSLayer struct { - // BlobSum is the tarsum of the referenced filesystem image layer - BlobSum digest.Digest `json:"blobSum"` -} - -// History stores unstructured v1 compatibility information -type History struct { - // V1Compatibility is the raw v1 compatibility information - V1Compatibility string `json:"v1Compatibility"` -} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/manifest_test.go b/vendor/github.com/docker/distribution/manifest/schema1/manifest_test.go deleted file mode 100644 index 16cedae3..00000000 --- a/vendor/github.com/docker/distribution/manifest/schema1/manifest_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package schema1 - -import ( - "bytes" - "encoding/json" - "reflect" - "testing" - - "github.com/docker/libtrust" -) - -type testEnv struct { - name, tag string - manifest *Manifest - signed *SignedManifest - pk libtrust.PrivateKey -} - -func TestManifestMarshaling(t *testing.T) { - env := genEnv(t) - - // Check that the Raw field is the same as json.MarshalIndent with these - // parameters. - p, err := json.MarshalIndent(env.signed, "", " ") - if err != nil { - t.Fatalf("error marshaling manifest: %v", err) - } - - if !bytes.Equal(p, env.signed.Raw) { - t.Fatalf("manifest bytes not equal: %q != %q", string(env.signed.Raw), string(p)) - } -} - -func TestManifestUnmarshaling(t *testing.T) { - env := genEnv(t) - - var signed SignedManifest - if err := json.Unmarshal(env.signed.Raw, &signed); err != nil { - t.Fatalf("error unmarshaling signed manifest: %v", err) - } - - if !reflect.DeepEqual(&signed, env.signed) { - t.Fatalf("manifests are different after unmarshaling: %v != %v", signed, env.signed) - } -} - -func TestManifestVerification(t *testing.T) { - env := genEnv(t) - - publicKeys, err := Verify(env.signed) - if err != nil { - t.Fatalf("error verifying manifest: %v", err) - } - - if len(publicKeys) == 0 { - t.Fatalf("no public keys found in signature") - } - - var found bool - publicKey := env.pk.PublicKey() - // ensure that one of the extracted public keys matches the private key. - for _, candidate := range publicKeys { - if candidate.KeyID() == publicKey.KeyID() { - found = true - break - } - } - - if !found { - t.Fatalf("expected public key, %v, not found in verified keys: %v", publicKey, publicKeys) - } -} - -func genEnv(t *testing.T) *testEnv { - pk, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("error generating test key: %v", err) - } - - name, tag := "foo/bar", "test" - - m := Manifest{ - Versioned: SchemaVersion, - Name: name, - Tag: tag, - FSLayers: []FSLayer{ - { - BlobSum: "asdf", - }, - { - BlobSum: "qwer", - }, - }, - } - - sm, err := Sign(&m, pk) - if err != nil { - t.Fatalf("error signing manifest: %v", err) - } - - return &testEnv{ - name: name, - tag: tag, - manifest: &m, - signed: sm, - pk: pk, - } -} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/sign.go b/vendor/github.com/docker/distribution/manifest/schema1/sign.go deleted file mode 100644 index 1b7b674a..00000000 --- a/vendor/github.com/docker/distribution/manifest/schema1/sign.go +++ /dev/null @@ -1,66 +0,0 @@ -package schema1 - -import ( - "crypto/x509" - "encoding/json" - - "github.com/docker/libtrust" -) - -// Sign signs the manifest with the provided private key, returning a -// SignedManifest. This typically won't be used within the registry, except -// for testing. -func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { - p, err := json.MarshalIndent(m, "", " ") - if err != nil { - return nil, err - } - - js, err := libtrust.NewJSONSignature(p) - if err != nil { - return nil, err - } - - if err := js.Sign(pk); err != nil { - return nil, err - } - - pretty, err := js.PrettySignature("signatures") - if err != nil { - return nil, err - } - - return &SignedManifest{ - Manifest: *m, - Raw: pretty, - }, nil -} - -// SignWithChain signs the manifest with the given private key and x509 chain. -// The public key of the first element in the chain must be the public key -// corresponding with the sign key. -func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { - p, err := json.MarshalIndent(m, "", " ") - if err != nil { - return nil, err - } - - js, err := libtrust.NewJSONSignature(p) - if err != nil { - return nil, err - } - - if err := js.SignWithChain(key, chain); err != nil { - return nil, err - } - - pretty, err := js.PrettySignature("signatures") - if err != nil { - return nil, err - } - - return &SignedManifest{ - Manifest: *m, - Raw: pretty, - }, nil -} diff --git a/vendor/github.com/docker/distribution/manifest/schema1/verify.go b/vendor/github.com/docker/distribution/manifest/schema1/verify.go deleted file mode 100644 index 60f8cda0..00000000 --- a/vendor/github.com/docker/distribution/manifest/schema1/verify.go +++ /dev/null @@ -1,32 +0,0 @@ -package schema1 - -import ( - "crypto/x509" - - "github.com/Sirupsen/logrus" - "github.com/docker/libtrust" -) - -// Verify verifies the signature of the signed manifest returning the public -// keys used during signing. -func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { - js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") - if err != nil { - logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") - return nil, err - } - - return js.Verify() -} - -// VerifyChains verifies the signature of the signed manifest against the -// certificate pool returning the list of verified chains. Signatures without -// an x509 chain are not checked. -func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { - js, err := libtrust.ParsePrettySignature(sm.Raw, "signatures") - if err != nil { - return nil, err - } - - return js.VerifyChains(ca) -} diff --git a/vendor/github.com/docker/distribution/manifest/versioned.go b/vendor/github.com/docker/distribution/manifest/versioned.go deleted file mode 100644 index bef38292..00000000 --- a/vendor/github.com/docker/distribution/manifest/versioned.go +++ /dev/null @@ -1,9 +0,0 @@ -package manifest - -// Versioned provides a struct with just the manifest schemaVersion. Incoming -// content with unknown schema version can be decoded against this struct to -// check the version. -type Versioned struct { - // SchemaVersion is the image manifest schema that this image follows - SchemaVersion int `json:"schemaVersion"` -} diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go new file mode 100644 index 00000000..1acb0500 --- /dev/null +++ b/vendor/github.com/docker/distribution/manifests.go @@ -0,0 +1,117 @@ +package distribution + +import ( + "fmt" + "mime" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// Manifest represents a registry object specifying a set of +// references and an optional target +type Manifest interface { + // References returns a list of objects which make up this manifest. + // The references are strictly ordered from base to head. A reference + // is anything which can be represented by a distribution.Descriptor + References() []Descriptor + + // Payload provides the serialized format of the manifest, in addition to + // the mediatype. + Payload() (mediatype string, payload []byte, err error) +} + +// ManifestBuilder creates a manifest allowing one to include dependencies. +// Instances can be obtained from a version-specific manifest package. Manifest +// specific data is passed into the function which creates the builder. +type ManifestBuilder interface { + // Build creates the manifest from his builder. + Build(ctx context.Context) (Manifest, error) + + // References returns a list of objects which have been added to this + // builder. The dependencies are returned in the order they were added, + // which should be from base to head. + References() []Descriptor + + // AppendReference includes the given object in the manifest after any + // existing dependencies. If the add fails, such as when adding an + // unsupported dependency, an error may be returned. + AppendReference(dependency Describable) error +} + +// ManifestService describes operations on image manifests. +type ManifestService interface { + // Exists returns true if the manifest exists. + Exists(ctx context.Context, dgst digest.Digest) (bool, error) + + // Get retrieves the manifest specified by the given digest + Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest + Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) + + // Delete removes the manifest specified by the given digest. Deleting + // a manifest that doesn't exist will return ErrManifestNotFound + Delete(ctx context.Context, dgst digest.Digest) error + + // Enumerate fills 'manifests' with the manifests in this service up + // to the size of 'manifests' and returns 'n' for the number of entries + // which were filled. 'last' contains an offset in the manifest set + // and can be used to resume iteration. + //Enumerate(ctx context.Context, manifests []Manifest, last Manifest) (n int, err error) +} + +// Describable is an interface for descriptors +type Describable interface { + Descriptor() Descriptor +} + +// ManifestMediaTypes returns the supported media types for manifests. +func ManifestMediaTypes() (mediaTypes []string) { + for t := range mappings { + if t != "" { + mediaTypes = append(mediaTypes, t) + } + } + return +} + +// UnmarshalFunc implements manifest unmarshalling a given MediaType +type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) + +var mappings = make(map[string]UnmarshalFunc, 0) + +// UnmarshalManifest looks up manifest unmarshall functions based on +// MediaType +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { + // Need to look up by the actual media type, not the raw contents of + // the header. Strip semicolons and anything following them. + var mediatype string + if ctHeader != "" { + var err error + mediatype, _, err = mime.ParseMediaType(ctHeader) + if err != nil { + return nil, Descriptor{}, err + } + } + + unmarshalFunc, ok := mappings[mediatype] + if !ok { + unmarshalFunc, ok = mappings[""] + if !ok { + return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) + } + } + + return unmarshalFunc(p) +} + +// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This +// should be called from specific +func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error { + if _, ok := mappings[mediatype]; ok { + return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype) + } + mappings[mediatype] = u + return nil +} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index 3a5d36c2..c188472a 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -4,27 +4,21 @@ // Grammar // // reference := repository [ ":" tag ] [ "@" digest ] +// name := [hostname '/'] component ['/' component]* +// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] +// hostcomponent := /([a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])/ +// port-number := /[0-9]+/ +// component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ // -// // repository.go -// repository := hostname ['/' component]+ -// hostname := hostcomponent [':' port-number] -// component := subcomponent [separator subcomponent]* -// subcomponent := alpha-numeric ['-'* alpha-numeric]* -// hostcomponent := [hostpart '.']* hostpart -// alpha-numeric := /[a-z0-9]+/ -// separator := /([_.]|__)/ -// port-number := /[0-9]+/ -// hostpart := /([a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])/ -// -// // tag.go // tag := /[\w][\w.-]{0,127}/ // -// // from the digest package // digest := digest-algorithm ":" digest-hex // digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] // digest-algorithm-separator := /[+.-_]/ // digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; Atleast 128 bit digest value +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value package reference import ( @@ -52,8 +46,7 @@ var ( // ErrNameEmpty is returned for empty, invalid repository names. ErrNameEmpty = errors.New("repository name must have at least one component") - // ErrNameTooLong is returned when a repository name is longer than - // RepositoryNameTotalLengthMax + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) ) diff --git a/vendor/github.com/docker/distribution/reference/reference_test.go b/vendor/github.com/docker/distribution/reference/reference_test.go index 8e1ac1f3..cde1a7a2 100644 --- a/vendor/github.com/docker/distribution/reference/reference_test.go +++ b/vendor/github.com/docker/distribution/reference/reference_test.go @@ -87,6 +87,10 @@ func TestReferenceParse(t *testing.T) { input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", err: ErrReferenceInvalidFormat, }, + { + input: "repo@sha256:ffffffffffffffffffffffffffffffffff", + err: digest.ErrDigestInvalidLength, + }, { input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", err: digest.ErrDigestUnsupported, @@ -129,11 +133,11 @@ func TestReferenceParse(t *testing.T) { tag: "xn--n3h.com", }, { - input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode + input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode hostname: "xn--7o8h.com", repository: "xn--7o8h.com/myimage", tag: "xn--7o8h.com", - digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", }, { input: "foo_bar.com:8080", @@ -343,9 +347,9 @@ func TestSerialization(t *testing.T) { }, { description: "name with digest", - input: "other.com/named@sha256:1234567890098765432112345667890098765", + input: "other.com/named@sha256:1234567890098765432112345667890098765432112345667890098765432112", name: "other.com/named", - digest: "sha256:1234567890098765432112345667890098765", + digest: "sha256:1234567890098765432112345667890098765432112345667890098765432112", }, } for _, testcase := range testcases { diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go index 06ca8db3..a4ffe5b6 100644 --- a/vendor/github.com/docker/distribution/reference/regexp.go +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -3,47 +3,122 @@ package reference import "regexp" var ( - // nameSubComponentRegexp defines the part of the name which must be - // begin and end with an alphanumeric character. These characters can - // be separated by any number of dashes. - nameSubComponentRegexp = regexp.MustCompile(`[a-z0-9]+(?:[-]+[a-z0-9]+)*`) + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) - // nameComponentRegexp restricts registry path component names to - // start with at least one letter or number, with following parts able to - // be separated by one period, underscore or double underscore. - nameComponentRegexp = regexp.MustCompile(nameSubComponentRegexp.String() + `(?:(?:[._]|__)` + nameSubComponentRegexp.String() + `)*`) + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) - nameRegexp = regexp.MustCompile(`(?:` + nameComponentRegexp.String() + `/)*` + nameComponentRegexp.String()) + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) - hostnameComponentRegexp = regexp.MustCompile(`(?:[a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])`) + // hostnameComponentRegexp restricts the registry hostname component of a + // repository name to start with a component as defined by hostnameRegexp + // and followed by an optional port. + hostnameComponentRegexp = match(`(?:[a-z0-9]|[a-z0-9][a-z0-9-]*[a-z0-9])`) - // hostnameComponentRegexp restricts the registry hostname component of a repository name to - // start with a component as defined by hostnameRegexp and followed by an optional port. - hostnameRegexp = regexp.MustCompile(`(?:` + hostnameComponentRegexp.String() + `\.)*` + hostnameComponentRegexp.String() + `(?::[0-9]+)?`) + // hostnameRegexp defines the structure of potential hostname components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + hostnameRegexp = expression( + hostnameComponentRegexp, + optional(repeated(literal(`.`), hostnameComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = regexp.MustCompile(`[\w][\w.-]{0,127}`) + TagRegexp = match(`[\w][\w.-]{0,127}`) // anchoredTagRegexp matches valid tag names, anchored at the start and // end of the matched string. - anchoredTagRegexp = regexp.MustCompile(`^` + TagRegexp.String() + `$`) + anchoredTagRegexp = anchored(TagRegexp) // DigestRegexp matches valid digests. - DigestRegexp = regexp.MustCompile(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) // anchoredDigestRegexp matches valid digests, anchored at the start and // end of the matched string. - anchoredDigestRegexp = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + anchoredDigestRegexp = anchored(DigestRegexp) // NameRegexp is the format for the name component of references. The // regexp has capturing groups for the hostname and name part omitting // the seperating forward slash from either. - NameRegexp = regexp.MustCompile(`(?:` + hostnameRegexp.String() + `/)?` + nameRegexp.String()) + NameRegexp = expression( + optional(hostnameRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) - // ReferenceRegexp is the full supported format of a reference. The - // regexp has capturing groups for name, tag, and digest components. - ReferenceRegexp = regexp.MustCompile(`^((?:` + hostnameRegexp.String() + `/)?` + nameRegexp.String() + `)(?:[:](` + TagRegexp.String() + `))?(?:[@](` + DigestRegexp.String() + `))?$`) + // anchoredNameRegexp is used to parse a name value, capturing the + // hostname and trailing components. + anchoredNameRegexp = anchored( + optional(capture(hostnameRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) - // anchoredNameRegexp is used to parse a name value, capturing hostname - anchoredNameRegexp = regexp.MustCompile(`^(?:(` + hostnameRegexp.String() + `)/)?(` + nameRegexp.String() + `)$`) + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) ) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/docker/distribution/reference/regexp_test.go b/vendor/github.com/docker/distribution/reference/regexp_test.go index 530a6eb6..33944918 100644 --- a/vendor/github.com/docker/distribution/reference/regexp_test.go +++ b/vendor/github.com/docker/distribution/reference/regexp_test.go @@ -119,6 +119,11 @@ func TestHostRegexp(t *testing.T) { } func TestFullNameRegexp(t *testing.T) { + if anchoredNameRegexp.NumSubexp() != 2 { + t.Fatalf("anchored name regexp should have two submatches: %v, %v != 2", + anchoredNameRegexp, anchoredNameRegexp.NumSubexp()) + } + testcases := []regexpMatch{ { input: "", @@ -401,6 +406,11 @@ func TestFullNameRegexp(t *testing.T) { } func TestReferenceRegexp(t *testing.T) { + if ReferenceRegexp.NumSubexp() != 3 { + t.Fatalf("anchored name regexp should have three submatches: %v, %v != 3", + ReferenceRegexp, ReferenceRegexp.NumSubexp()) + } + testcases := []regexpMatch{ { input: "registry.com:8080/myapp:tag", diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go index 001776f8..dcb35c37 100644 --- a/vendor/github.com/docker/distribution/registry.go +++ b/vendor/github.com/docker/distribution/registry.go @@ -2,8 +2,7 @@ package distribution import ( "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" ) // Scope defines the set of items that match a namespace. @@ -34,7 +33,7 @@ type Namespace interface { // Repository should return a reference to the named repository. The // registry may or may not have the repository but should always return a // reference. - Repository(ctx context.Context, name string) (Repository, error) + Repository(ctx context.Context, name reference.Named) (Repository, error) // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories // up to the size of 'repos' and returns the value 'n' for the number of entries @@ -44,12 +43,14 @@ type Namespace interface { } // ManifestServiceOption is a function argument for Manifest Service methods -type ManifestServiceOption func(ManifestService) error +type ManifestServiceOption interface { + Apply(ManifestService) error +} // Repository is a named collection of manifests and layers. type Repository interface { // Name returns the name of the repository. - Name() string + Name() reference.Named // Manifests returns a reference to this repository's manifest service. // with the supplied options applied. @@ -62,59 +63,10 @@ type Repository interface { // be a BlobService for use with clients. This will allow such // implementations to avoid implementing ServeBlob. - // Signatures returns a reference to this repository's signatures service. - Signatures() SignatureService + // Tags returns a reference to this repositories tag service + Tags(ctx context.Context) TagService } // TODO(stevvooe): Must add close methods to all these. May want to change the // way instances are created to better reflect internal dependency // relationships. - -// ManifestService provides operations on image manifests. -type ManifestService interface { - // Exists returns true if the manifest exists. - Exists(dgst digest.Digest) (bool, error) - - // Get retrieves the identified by the digest, if it exists. - Get(dgst digest.Digest) (*schema1.SignedManifest, error) - - // Delete removes the manifest, if it exists. - Delete(dgst digest.Digest) error - - // Put creates or updates the manifest. - Put(manifest *schema1.SignedManifest) error - - // TODO(stevvooe): The methods after this message should be moved to a - // discrete TagService, per active proposals. - - // Tags lists the tags under the named repository. - Tags() ([]string, error) - - // ExistsByTag returns true if the manifest exists. - ExistsByTag(tag string) (bool, error) - - // GetByTag retrieves the named manifest, if it exists. - GetByTag(tag string, options ...ManifestServiceOption) (*schema1.SignedManifest, error) - - // TODO(stevvooe): There are several changes that need to be done to this - // interface: - // - // 1. Allow explicit tagging with Tag(digest digest.Digest, tag string) - // 2. Support reading tags with a re-entrant reader to avoid large - // allocations in the registry. - // 3. Long-term: Provide All() method that lets one scroll through all of - // the manifest entries. - // 4. Long-term: break out concept of signing from manifests. This is - // really a part of the distribution sprint. - // 5. Long-term: Manifest should be an interface. This code shouldn't - // really be concerned with the storage format. -} - -// SignatureService provides operations on signatures. -type SignatureService interface { - // Get retrieves all of the signature blobs for the specified digest. - Get(dgst digest.Digest) ([][]byte, error) - - // Put stores the signature for the provided digest. - Put(dgst digest.Digest, signatures ...[]byte) error -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go index fdaddbcf..6d9bb4b6 100644 --- a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -25,7 +25,8 @@ func (ec ErrorCode) ErrorCode() ErrorCode { // Error returns the ID/Value func (ec ErrorCode) Error() string { - return ec.Descriptor().Value + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) } // Descriptor returns the descriptor for the error code. @@ -68,6 +69,15 @@ func (ec *ErrorCode) UnmarshalText(text []byte) error { return nil } +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + // WithDetail creates a new Error struct based on the passed-in info and // set the Detail property appropriately func (ec ErrorCode) WithDetail(detail interface{}) Error { @@ -104,9 +114,7 @@ func (e Error) ErrorCode() ErrorCode { // Error returns a human readable representation of the error. func (e Error) Error() string { - return fmt.Sprintf("%s: %s", - strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)), - e.Message) + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) } // WithDetail will return a new Error, based on the current one, but with diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors_test.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors_test.go index 27fb1cec..54e7a736 100644 --- a/vendor/github.com/docker/distribution/registry/api/errcode/errors_test.go +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors_test.go @@ -4,9 +4,33 @@ import ( "encoding/json" "net/http" "reflect" + "strings" "testing" ) +// TestErrorsManagement does a quick check of the Errors type to ensure that +// members are properly pushed and marshaled. +var ErrorCodeTest1 = Register("test.errors", ErrorDescriptor{ + Value: "TEST1", + Message: "test error 1", + Description: `Just a test message #1.`, + HTTPStatusCode: http.StatusInternalServerError, +}) + +var ErrorCodeTest2 = Register("test.errors", ErrorDescriptor{ + Value: "TEST2", + Message: "test error 2", + Description: `Just a test message #2.`, + HTTPStatusCode: http.StatusNotFound, +}) + +var ErrorCodeTest3 = Register("test.errors", ErrorDescriptor{ + Value: "TEST3", + Message: "Sorry %q isn't valid", + Description: `Just a test message #3.`, + HTTPStatusCode: http.StatusNotFound, +}) + // TestErrorCodes ensures that error code format, mappings and // marshaling/unmarshaling. round trips are stable. func TestErrorCodes(t *testing.T) { @@ -56,33 +80,15 @@ func TestErrorCodes(t *testing.T) { if ecUnmarshaled != ec { t.Fatalf("unexpected error code during error code marshal/unmarshal: %v != %v", ecUnmarshaled, ec) } + + expectedErrorString := strings.ToLower(strings.Replace(ec.Descriptor().Value, "_", " ", -1)) + if ec.Error() != expectedErrorString { + t.Fatalf("unexpected return from %v.Error(): %q != %q", ec, ec.Error(), expectedErrorString) + } } } -// TestErrorsManagement does a quick check of the Errors type to ensure that -// members are properly pushed and marshaled. -var ErrorCodeTest1 = Register("v2.errors", ErrorDescriptor{ - Value: "TEST1", - Message: "test error 1", - Description: `Just a test message #1.`, - HTTPStatusCode: http.StatusInternalServerError, -}) - -var ErrorCodeTest2 = Register("v2.errors", ErrorDescriptor{ - Value: "TEST2", - Message: "test error 2", - Description: `Just a test message #2.`, - HTTPStatusCode: http.StatusNotFound, -}) - -var ErrorCodeTest3 = Register("v2.errors", ErrorDescriptor{ - Value: "TEST3", - Message: "Sorry %q isn't valid", - Description: `Just a test message #3.`, - HTTPStatusCode: http.StatusNotFound, -}) - func TestErrorsManagement(t *testing.T) { var errs Errors diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go index 7eba362a..ad3da3ef 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -495,7 +495,7 @@ var routeDescriptors = []RouteDescriptor{ Methods: []MethodDescriptor{ { Method: "GET", - Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", Requests: []RequestDescriptor{ { Headers: []ParameterDescriptor{ @@ -1041,6 +1041,70 @@ var routeDescriptors = []RouteDescriptor{ deniedResponseDescriptor, }, }, + { + Name: "Mount Blob", + Description: "Mount a blob identified by the `mount` parameter from another repository.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "mount", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of blob to mount from the source repository.`, + }, + { + Name: "from", + Type: "query", + Format: "", + Regexp: reference.NameRegexp, + Description: `Name of the source repository.`, + }, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been mounted in the repository and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + }, + }, }, }, }, diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go b/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go index f6379977..f632d981 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/routes_test.go @@ -87,14 +87,6 @@ func TestRouter(t *testing.T) { "name": "docker.com/foo/bar/baz", }, }, - { - RouteName: RouteNameBlob, - RequestURI: "/v2/foo/bar/blobs/tarsum.dev+foo:abcdef0919234", - Vars: map[string]string{ - "name": "foo/bar", - "digest": "tarsum.dev+foo:abcdef0919234", - }, - }, { RouteName: RouteNameBlob, RequestURI: "/v2/foo/bar/blobs/sha256:abcdef0919234", diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go index 42974394..408c7b74 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/urls.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go @@ -5,7 +5,7 @@ import ( "net/url" "strings" - "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/gorilla/mux" ) @@ -113,10 +113,10 @@ func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { } // BuildTagsURL constructs a url to list the tags in the named repository. -func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { +func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { route := ub.cloneRoute(RouteNameTags) - tagsURL, err := route.URL("name", name) + tagsURL, err := route.URL("name", name.Name()) if err != nil { return "", err } @@ -126,10 +126,18 @@ func (ub *URLBuilder) BuildTagsURL(name string) (string, error) { // BuildManifestURL constructs a url for the manifest identified by name and // reference. The argument reference may be either a tag or digest. -func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { +func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { route := ub.cloneRoute(RouteNameManifest) - manifestURL, err := route.URL("name", name, "reference", reference) + tagOrDigest := "" + switch v := ref.(type) { + case reference.Tagged: + tagOrDigest = v.Tag() + case reference.Digested: + tagOrDigest = v.Digest().String() + } + + manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) if err != nil { return "", err } @@ -138,10 +146,10 @@ func (ub *URLBuilder) BuildManifestURL(name, reference string) (string, error) { } // BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) { +func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { route := ub.cloneRoute(RouteNameBlob) - layerURL, err := route.URL("name", name, "digest", dgst.String()) + layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) if err != nil { return "", err } @@ -151,10 +159,10 @@ func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, err // BuildBlobUploadURL constructs a url to begin a blob upload in the // repository identified by name. -func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (string, error) { +func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUpload) - uploadURL, err := route.URL("name", name) + uploadURL, err := route.URL("name", name.Name()) if err != nil { return "", err } @@ -166,10 +174,10 @@ func (ub *URLBuilder) BuildBlobUploadURL(name string, values ...url.Values) (str // including any url values. This should generally not be used by clients, as // this url is provided by server implementations during the blob upload // process. -func (ub *URLBuilder) BuildBlobUploadChunkURL(name, uuid string, values ...url.Values) (string, error) { +func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { route := ub.cloneRoute(RouteNameBlobUploadChunk) - uploadURL, err := route.URL("name", name, "uuid", uuid) + uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) if err != nil { return "", err } @@ -204,7 +212,9 @@ func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { routeURL.Path = routeURL.Path[1:] } - return cr.root.ResolveReference(routeURL), nil + url := cr.root.ResolveReference(routeURL) + url.Scheme = cr.root.Scheme + return url, nil } // appendValuesURL appends the parameters to the url. diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go b/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go index 61d41547..1af1f261 100644 --- a/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls_test.go @@ -4,6 +4,8 @@ import ( "net/http" "net/url" "testing" + + "github.com/docker/distribution/reference" ) type urlBuilderTestCase struct { @@ -13,6 +15,7 @@ type urlBuilderTestCase struct { } func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { + fooBarRef, _ := reference.ParseNamed("foo/bar") return []urlBuilderTestCase{ { description: "test base url", @@ -23,37 +26,39 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { description: "test tags url", expectedPath: "/v2/foo/bar/tags/list", build: func() (string, error) { - return urlBuilder.BuildTagsURL("foo/bar") + return urlBuilder.BuildTagsURL(fooBarRef) }, }, { description: "test manifest url", expectedPath: "/v2/foo/bar/manifests/tag", build: func() (string, error) { - return urlBuilder.BuildManifestURL("foo/bar", "tag") + ref, _ := reference.WithTag(fooBarRef, "tag") + return urlBuilder.BuildManifestURL(ref) }, }, { description: "build blob url", - expectedPath: "/v2/foo/bar/blobs/tarsum.v1+sha256:abcdef0123456789", + expectedPath: "/v2/foo/bar/blobs/sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5", build: func() (string, error) { - return urlBuilder.BuildBlobURL("foo/bar", "tarsum.v1+sha256:abcdef0123456789") + ref, _ := reference.WithDigest(fooBarRef, "sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5") + return urlBuilder.BuildBlobURL(ref) }, }, { description: "build blob upload url", expectedPath: "/v2/foo/bar/blobs/uploads/", build: func() (string, error) { - return urlBuilder.BuildBlobUploadURL("foo/bar") + return urlBuilder.BuildBlobUploadURL(fooBarRef) }, }, { description: "build blob upload url with digest and size", - expectedPath: "/v2/foo/bar/blobs/uploads/?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", + expectedPath: "/v2/foo/bar/blobs/uploads/?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", build: func() (string, error) { - return urlBuilder.BuildBlobUploadURL("foo/bar", url.Values{ + return urlBuilder.BuildBlobUploadURL(fooBarRef, url.Values{ "size": []string{"10000"}, - "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, + "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, }) }, }, @@ -61,16 +66,16 @@ func makeURLBuilderTestCases(urlBuilder *URLBuilder) []urlBuilderTestCase { description: "build blob upload chunk url", expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part", build: func() (string, error) { - return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part") + return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part") }, }, { description: "build blob upload chunk url with digest and size", - expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=tarsum.v1%2Bsha256%3Aabcdef0123456789&size=10000", + expectedPath: "/v2/foo/bar/blobs/uploads/uuid-part?digest=sha256%3A3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5&size=10000", build: func() (string, error) { - return urlBuilder.BuildBlobUploadChunkURL("foo/bar", "uuid-part", url.Values{ + return urlBuilder.BuildBlobUploadChunkURL(fooBarRef, "uuid-part", url.Values{ "size": []string{"10000"}, - "digest": []string{"tarsum.v1+sha256:abcdef0123456789"}, + "digest": []string{"sha256:3b3692957d439ac1928219a83fac91e7bf96c153725526874673ae1f2023f8d5"}, }) }, }, @@ -166,6 +171,11 @@ func TestBuilderFromRequest(t *testing.T) { request: &http.Request{URL: u, Host: u.Host}, base: "http://example.com", }, + + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "http://example.com", + }, { request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, base: "https://example.com", @@ -197,15 +207,26 @@ func TestBuilderFromRequest(t *testing.T) { } for _, testCase := range makeURLBuilderTestCases(builder) { - url, err := testCase.build() + buildURL, err := testCase.build() if err != nil { t.Fatalf("%s: error building url: %v", testCase.description, err) } - expectedURL := tr.base + testCase.expectedPath + var expectedURL string + proto, ok := tr.request.Header["X-Forwarded-Proto"] + if !ok { + expectedURL = tr.base + testCase.expectedPath + } else { + urlBase, err := url.Parse(tr.base) + if err != nil { + t.Fatal(err) + } + urlBase.Scheme = proto[0] + expectedURL = urlBase.String() + testCase.expectedPath + } - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + if buildURL != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) } } } @@ -229,6 +250,11 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { request: &http.Request{URL: u, Host: u.Host}, base: "http://example.com/prefix/", }, + + { + request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, + base: "http://example.com/prefix/", + }, { request: &http.Request{URL: u, Host: u.Host, Header: forwardedProtoHeader}, base: "https://example.com/prefix/", @@ -238,7 +264,8 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { base: "https://subdomain.example.com/prefix/", configHost: url.URL{ Scheme: "https", - Host: "subdomain.example.com/prefix", + Host: "subdomain.example.com", + Path: "/prefix/", }, }, } @@ -252,15 +279,25 @@ func TestBuilderFromRequestWithPrefix(t *testing.T) { } for _, testCase := range makeURLBuilderTestCases(builder) { - url, err := testCase.build() + buildURL, err := testCase.build() if err != nil { t.Fatalf("%s: error building url: %v", testCase.description, err) } + var expectedURL string + proto, ok := tr.request.Header["X-Forwarded-Proto"] + if !ok { + expectedURL = tr.base[0:len(tr.base)-1] + testCase.expectedPath + } else { + urlBase, err := url.Parse(tr.base) + if err != nil { + t.Fatal(err) + } + urlBase.Scheme = proto[0] + expectedURL = urlBase.String()[0:len(urlBase.String())-1] + testCase.expectedPath + } - expectedURL := tr.base[0:len(tr.base)-1] + testCase.expectedPath - - if url != expectedURL { - t.Fatalf("%s: %q != %q", testCase.description, url, expectedURL) + if buildURL != expectedURL { + t.Fatalf("%s: %q != %q", testCase.description, buildURL, expectedURL) } } } diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go index 6c92fc34..6b483c62 100644 --- a/vendor/github.com/docker/distribution/registry/client/auth/session.go +++ b/vendor/github.com/docker/distribution/registry/client/auth/session.go @@ -108,6 +108,8 @@ type tokenHandler struct { tokenLock sync.Mutex tokenCache string tokenExpiration time.Time + + additionalScopes map[string]struct{} } // tokenScope represents the scope at which a token will be requested. @@ -145,6 +147,7 @@ func newTokenHandler(transport http.RoundTripper, creds CredentialStore, c clock Scope: scope, Actions: actions, }, + additionalScopes: map[string]struct{}{}, } } @@ -160,7 +163,15 @@ func (th *tokenHandler) Scheme() string { } func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - if err := th.refreshToken(params); err != nil { + var additionalScopes []string + if fromParam := req.URL.Query().Get("from"); fromParam != "" { + additionalScopes = append(additionalScopes, tokenScope{ + Resource: "repository", + Scope: fromParam, + Actions: []string{"pull"}, + }.String()) + } + if err := th.refreshToken(params, additionalScopes...); err != nil { return err } @@ -169,11 +180,18 @@ func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]st return nil } -func (th *tokenHandler) refreshToken(params map[string]string) error { +func (th *tokenHandler) refreshToken(params map[string]string, additionalScopes ...string) error { th.tokenLock.Lock() defer th.tokenLock.Unlock() + var addedScopes bool + for _, scope := range additionalScopes { + if _, ok := th.additionalScopes[scope]; !ok { + th.additionalScopes[scope] = struct{}{} + addedScopes = true + } + } now := th.clock.Now() - if now.After(th.tokenExpiration) { + if now.After(th.tokenExpiration) || addedScopes { tr, err := th.fetchToken(params) if err != nil { return err @@ -223,6 +241,10 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon reqParams.Add("scope", scopeField) } + for scope := range th.additionalScopes { + reqParams.Add("scope", scope) + } + if th.creds != nil { username, password := th.creds.Basic(realmURL) if username != "" && password != "" { @@ -240,7 +262,8 @@ func (th *tokenHandler) fetchToken(params map[string]string) (token *tokenRespon defer resp.Body.Close() if !client.SuccessStatus(resp.StatusCode) { - return nil, fmt.Errorf("token auth attempt for registry: %s request failed with status: %d %s", req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) + err := client.HandleErrorResponse(resp) + return nil, err } decoder := json.NewDecoder(resp.Body) diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go index c7eee4e8..21a018dc 100644 --- a/vendor/github.com/docker/distribution/registry/client/blob_writer.go +++ b/vendor/github.com/docker/distribution/registry/client/blob_writer.go @@ -33,7 +33,7 @@ func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { if resp.StatusCode == http.StatusNotFound { return distribution.ErrBlobUploadUnknown } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go index 7305c021..a528a865 100644 --- a/vendor/github.com/docker/distribution/registry/client/errors.go +++ b/vendor/github.com/docker/distribution/registry/client/errors.go @@ -31,13 +31,26 @@ func (e *UnexpectedHTTPResponseError) Error() string { return fmt.Sprintf("Error parsing HTTP response: %s: %q", e.ParseErr.Error(), string(e.Response)) } -func parseHTTPErrorResponse(r io.Reader) error { +func parseHTTPErrorResponse(statusCode int, r io.Reader) error { var errors errcode.Errors body, err := ioutil.ReadAll(r) if err != nil { return err } + // For backward compatibility, handle irregularly formatted + // messages that contain a "details" field. + var detailsErr struct { + Details string `json:"details"` + } + err = json.Unmarshal(body, &detailsErr) + if err == nil && detailsErr.Details != "" { + if statusCode == http.StatusUnauthorized { + return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) + } + return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) + } + if err := json.Unmarshal(body, &errors); err != nil { return &UnexpectedHTTPResponseError{ ParseErr: err, @@ -47,16 +60,20 @@ func parseHTTPErrorResponse(r io.Reader) error { return errors } -func handleErrorResponse(resp *http.Response) error { +// HandleErrorResponse returns error parsed from HTTP response for an +// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An +// UnexpectedHTTPStatusError returned for response code outside of expected +// range. +func HandleErrorResponse(resp *http.Response) error { if resp.StatusCode == 401 { - err := parseHTTPErrorResponse(resp.Body) + err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) } return err } if resp.StatusCode >= 400 && resp.StatusCode < 500 { - return parseHTTPErrorResponse(resp.Body) + return parseHTTPErrorResponse(resp.StatusCode, resp.Body) } return &UnexpectedHTTPStatusError{Status: resp.Status} } diff --git a/vendor/github.com/docker/distribution/registry/client/errors_test.go b/vendor/github.com/docker/distribution/registry/client/errors_test.go new file mode 100644 index 00000000..80241a5a --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/errors_test.go @@ -0,0 +1,89 @@ +package client + +import ( + "bytes" + "io" + "net/http" + "strings" + "testing" +) + +type nopCloser struct { + io.Reader +} + +func (nopCloser) Close() error { return nil } + +func TestHandleErrorResponse401ValidBody(t *testing.T) { + json := "{\"errors\":[{\"code\":\"UNAUTHORIZED\",\"message\":\"action requires authentication\"}]}" + response := &http.Response{ + Status: "401 Unauthorized", + StatusCode: 401, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "unauthorized: action requires authentication" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponse401WithInvalidBody(t *testing.T) { + json := "{invalid json}" + response := &http.Response{ + Status: "401 Unauthorized", + StatusCode: 401, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "unauthorized: authentication required" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseExpectedStatusCode400ValidBody(t *testing.T) { + json := "{\"errors\":[{\"code\":\"DIGEST_INVALID\",\"message\":\"provided digest does not match\"}]}" + response := &http.Response{ + Status: "400 Bad Request", + StatusCode: 400, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "digest invalid: provided digest does not match" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseExpectedStatusCode404InvalidBody(t *testing.T) { + json := "{invalid json}" + response := &http.Response{ + Status: "404 Not Found", + StatusCode: 404, + Body: nopCloser{bytes.NewBufferString(json)}, + } + err := HandleErrorResponse(response) + + expectedMsg := "Error parsing HTTP response: invalid character 'i' looking for beginning of object key string: \"{invalid json}\"" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} + +func TestHandleErrorResponseUnexpectedStatusCode501(t *testing.T) { + response := &http.Response{ + Status: "501 Not Implemented", + StatusCode: 501, + Body: nopCloser{bytes.NewBufferString("{\"Error Encountered\" : \"Function not implemented.\"}")}, + } + err := HandleErrorResponse(response) + + expectedMsg := "Received unexpected HTTP status: 501 Not Implemented" + if !strings.Contains(err.Error(), expectedMsg) { + t.Errorf("Expected \"%s\", got: \"%s\"", expectedMsg, err.Error()) + } +} diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go index fc709ded..1f777add 100644 --- a/vendor/github.com/docker/distribution/registry/client/repository.go +++ b/vendor/github.com/docker/distribution/registry/client/repository.go @@ -3,6 +3,7 @@ package client import ( "bytes" "encoding/json" + "errors" "fmt" "io" "io/ioutil" @@ -14,7 +15,6 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/v2" "github.com/docker/distribution/registry/client/transport" @@ -91,18 +91,14 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri returnErr = io.EOF } } else { - return 0, handleErrorResponse(resp) + return 0, HandleErrorResponse(resp) } return numFilled, returnErr } // NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(ctx context.Context, name, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - if _, err := reference.ParseNamed(name); err != nil { - return nil, err - } - +func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { ub, err := v2.NewURLBuilderFromString(baseURL) if err != nil { return nil, err @@ -125,21 +121,21 @@ type repository struct { client *http.Client ub *v2.URLBuilder context context.Context - name string + name reference.Named } -func (r *repository) Name() string { +func (r *repository) Name() reference.Named { return r.name } func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { statter := &blobStatter{ - name: r.Name(), + name: r.name, ub: r.ub, client: r.client, } return &blobs{ - name: r.Name(), + name: r.name, ub: r.ub, client: r.client, statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), @@ -149,81 +145,166 @@ func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { // todo(richardscothern): options should be sent over the wire return &manifests{ - name: r.Name(), + name: r.name, ub: r.ub, client: r.client, etags: make(map[string]string), }, nil } -func (r *repository) Signatures() distribution.SignatureService { - ms, _ := r.Manifests(r.context) - return &signatures{ - manifests: ms, +func (r *repository) Tags(ctx context.Context) distribution.TagService { + return &tags{ + client: r.client, + ub: r.ub, + context: r.context, + name: r.Name(), } } -type signatures struct { - manifests distribution.ManifestService +// tags implements remote tagging operations. +type tags struct { + client *http.Client + ub *v2.URLBuilder + context context.Context + name reference.Named } -func (s *signatures) Get(dgst digest.Digest) ([][]byte, error) { - m, err := s.manifests.Get(dgst) +// All returns all tags +func (t *tags) All(ctx context.Context) ([]string, error) { + var tags []string + + u, err := t.ub.BuildTagsURL(t.name) if err != nil { - return nil, err - } - return m.Signatures() -} - -func (s *signatures) Put(dgst digest.Digest, signatures ...[]byte) error { - panic("not implemented") -} - -type manifests struct { - name string - ub *v2.URLBuilder - client *http.Client - etags map[string]string -} - -func (ms *manifests) Tags() ([]string, error) { - u, err := ms.ub.BuildTagsURL(ms.name) - if err != nil { - return nil, err + return tags, err } - resp, err := ms.client.Get(u) + resp, err := t.client.Get(u) if err != nil { - return nil, err + return tags, err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { b, err := ioutil.ReadAll(resp.Body) if err != nil { - return nil, err + return tags, err } tagsResponse := struct { Tags []string `json:"tags"` }{} if err := json.Unmarshal(b, &tagsResponse); err != nil { - return nil, err + return tags, err } - - return tagsResponse.Tags, nil + tags = tagsResponse.Tags + return tags, nil } - return nil, handleErrorResponse(resp) + return tags, HandleErrorResponse(resp) } -func (ms *manifests) Exists(dgst digest.Digest) (bool, error) { - // Call by Tag endpoint since the API uses the same - // URL endpoint for tags and digests. - return ms.ExistsByTag(dgst.String()) +func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { + desc := distribution.Descriptor{} + headers := response.Header + + ctHeader := headers.Get("Content-Type") + if ctHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") + } + desc.MediaType = ctHeader + + digestHeader := headers.Get("Docker-Content-Digest") + if digestHeader == "" { + bytes, err := ioutil.ReadAll(response.Body) + if err != nil { + return distribution.Descriptor{}, err + } + _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + + dgst, err := digest.ParseDigest(digestHeader) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Digest = dgst + + lengthHeader := headers.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") + } + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Size = length + + return desc, nil + } -func (ms *manifests) ExistsByTag(tag string) (bool, error) { - u, err := ms.ub.BuildManifestURL(ms.name, tag) +// Get issues a HEAD request for a Manifest against its named endpoint in order +// to construct a descriptor for the tag. If the registry doesn't support HEADing +// a manifest, fallback to GET. +func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + ref, err := reference.WithTag(t.name, tag) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := t.ub.BuildManifestURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + var attempts int + resp, err := t.client.Head(u) + +check: + if err != nil { + return distribution.Descriptor{}, err + } + + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400: + return descriptorFromResponse(resp) + case resp.StatusCode == http.StatusMethodNotAllowed: + resp, err = t.client.Get(u) + attempts++ + if attempts > 1 { + return distribution.Descriptor{}, err + } + goto check + default: + return distribution.Descriptor{}, HandleErrorResponse(resp) + } +} + +func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + panic("not implemented") +} + +func (t *tags) Untag(ctx context.Context, tag string) error { + panic("not implemented") +} + +type manifests struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + etags map[string]string +} + +func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return false, err + } + u, err := ms.ub.BuildManifestURL(ref) if err != nil { return false, err } @@ -238,49 +319,75 @@ func (ms *manifests) ExistsByTag(tag string) (bool, error) { } else if resp.StatusCode == http.StatusNotFound { return false, nil } - return false, handleErrorResponse(resp) + return false, HandleErrorResponse(resp) } -func (ms *manifests) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { - // Call by Tag endpoint since the API uses the same - // URL endpoint for tags and digests. - return ms.GetByTag(dgst.String()) -} - -// AddEtagToTag allows a client to supply an eTag to GetByTag which will be +// AddEtagToTag allows a client to supply an eTag to Get which will be // used for a conditional HTTP request. If the eTag matches, a nil manifest -// and nil error will be returned. etag is automatically quoted when added to -// this map. +// and ErrManifestNotModified error will be returned. etag is automatically +// quoted when added to this map. func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { - return func(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifests); ok { - ms.etags[tag] = fmt.Sprintf(`"%s"`, etag) - return nil - } - return fmt.Errorf("etag options is a client-only option") - } + return etagOption{tag, etag} } -func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { +type etagOption struct{ tag, etag string } + +func (o etagOption) Apply(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifests); ok { + ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) + return nil + } + return fmt.Errorf("etag options is a client-only option") +} + +func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + var ( + digestOrTag string + ref reference.Named + err error + ) + for _, option := range options { - err := option(ms) + if opt, ok := option.(withTagOption); ok { + digestOrTag = opt.tag + ref, err = reference.WithTag(ms.name, opt.tag) + if err != nil { + return nil, err + } + } else { + err := option.Apply(ms) + if err != nil { + return nil, err + } + } + } + + if digestOrTag == "" { + digestOrTag = dgst.String() + ref, err = reference.WithDigest(ms.name, dgst) if err != nil { return nil, err } } - u, err := ms.ub.BuildManifestURL(ms.name, tag) + u, err := ms.ub.BuildManifestURL(ref) if err != nil { return nil, err } + req, err := http.NewRequest("GET", u, nil) if err != nil { return nil, err } - if _, ok := ms.etags[tag]; ok { - req.Header.Set("If-None-Match", ms.etags[tag]) + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) } + + if _, ok := ms.etags[digestOrTag]; ok { + req.Header.Set("If-None-Match", ms.etags[digestOrTag]) + } + resp, err := ms.client.Do(req) if err != nil { return nil, err @@ -289,45 +396,98 @@ func (ms *manifests) GetByTag(tag string, options ...distribution.ManifestServic if resp.StatusCode == http.StatusNotModified { return nil, distribution.ErrManifestNotModified } else if SuccessStatus(resp.StatusCode) { - var sm schema1.SignedManifest - decoder := json.NewDecoder(resp.Body) + mt := resp.Header.Get("Content-Type") + body, err := ioutil.ReadAll(resp.Body) - if err := decoder.Decode(&sm); err != nil { + if err != nil { return nil, err } - return &sm, nil + m, _, err := distribution.UnmarshalManifest(mt, body) + if err != nil { + return nil, err + } + return m, nil } - return nil, handleErrorResponse(resp) + return nil, HandleErrorResponse(resp) } -func (ms *manifests) Put(m *schema1.SignedManifest) error { - manifestURL, err := ms.ub.BuildManifestURL(ms.name, m.Tag) - if err != nil { - return err +// WithTag allows a tag to be passed into Put which enables the client +// to build a correct URL. +func WithTag(tag string) distribution.ManifestServiceOption { + return withTagOption{tag} +} + +type withTagOption struct{ tag string } + +func (o withTagOption) Apply(m distribution.ManifestService) error { + if _, ok := m.(*manifests); ok { + return nil + } + return fmt.Errorf("withTagOption is a client-only option") +} + +// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the +// tag name in order to build the correct upload URL. This state is written and read under a lock. +func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + ref := ms.name + + for _, option := range options { + if opt, ok := option.(withTagOption); ok { + var err error + ref, err = reference.WithTag(ref, opt.tag) + if err != nil { + return "", err + } + } else { + err := option.Apply(ms) + if err != nil { + return "", err + } + } } - // todo(richardscothern): do something with options here when they become applicable - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(m.Raw)) + manifestURL, err := ms.ub.BuildManifestURL(ref) if err != nil { - return err + return "", err } + mediaType, p, err := m.Payload() + if err != nil { + return "", err + } + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) + if err != nil { + return "", err + } + + putRequest.Header.Set("Content-Type", mediaType) + resp, err := ms.client.Do(putRequest) if err != nil { - return err + return "", err } defer resp.Body.Close() if SuccessStatus(resp.StatusCode) { - // TODO(dmcgowan): make use of digest header - return nil + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgst, err := digest.ParseDigest(dgstHeader) + if err != nil { + return "", err + } + + return dgst, nil } - return handleErrorResponse(resp) + + return "", HandleErrorResponse(resp) } -func (ms *manifests) Delete(dgst digest.Digest) error { - u, err := ms.ub.BuildManifestURL(ms.name, dgst.String()) +func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return err + } + u, err := ms.ub.BuildManifestURL(ref) if err != nil { return err } @@ -345,11 +505,16 @@ func (ms *manifests) Delete(dgst digest.Digest) error { if SuccessStatus(resp.StatusCode) { return nil } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } +// todo(richardscothern): Restore interface and implementation with merge of #1050 +/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + panic("not supported") +}*/ + type blobs struct { - name string + name reference.Named ub *v2.URLBuilder client *http.Client @@ -377,11 +542,7 @@ func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Des } func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - desc, err := bs.Stat(ctx, dgst) - if err != nil { - return nil, err - } - reader, err := bs.Open(ctx, desc.Digest) + reader, err := bs.Open(ctx, dgst) if err != nil { return nil, err } @@ -391,17 +552,22 @@ func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { } func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - stat, err := bs.statter.Stat(ctx, dgst) + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return nil, err + } + blobURL, err := bs.ub.BuildBlobURL(ref) if err != nil { return nil, err } - blobURL, err := bs.ub.BuildBlobURL(bs.name, stat.Digest) - if err != nil { - return nil, err - } - - return transport.NewHTTPReadSeeker(bs.client, blobURL, stat.Size), nil + return transport.NewHTTPReadSeeker(bs.client, blobURL, + func(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUnknown + } + return HandleErrorResponse(resp) + }), nil } func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { @@ -431,8 +597,57 @@ func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribut return writer.Commit(ctx, desc) } -func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { - u, err := bs.ub.BuildBlobUploadURL(bs.name) +// createOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type createOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + } +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*createOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + var opts createOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + var values []url.Values + + if opts.Mount.ShouldMount { + values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) + } + + u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) + if err != nil { + return nil, err + } resp, err := bs.client.Post(u, "", nil) if err != nil { @@ -440,7 +655,14 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { } defer resp.Body.Close() - if SuccessStatus(resp.StatusCode) { + switch resp.StatusCode { + case http.StatusCreated: + desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) + if err != nil { + return nil, err + } + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + case http.StatusAccepted: // TODO(dmcgowan): Check for invalid UUID uuid := resp.Header.Get("Docker-Upload-UUID") location, err := sanitizeLocation(resp.Header.Get("Location"), u) @@ -455,8 +677,9 @@ func (bs *blobs) Create(ctx context.Context) (distribution.BlobWriter, error) { startedAt: time.Now(), location: location, }, nil + default: + return nil, HandleErrorResponse(resp) } - return nil, handleErrorResponse(resp) } func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { @@ -468,13 +691,17 @@ func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { } type blobStatter struct { - name string + name reference.Named ub *v2.URLBuilder client *http.Client } func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - u, err := bs.ub.BuildBlobURL(bs.name, dgst) + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := bs.ub.BuildBlobURL(ref) if err != nil { return distribution.Descriptor{}, err } @@ -487,6 +714,10 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi if SuccessStatus(resp.StatusCode) { lengthHeader := resp.Header.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) + } + length, err := strconv.ParseInt(lengthHeader, 10, 64) if err != nil { return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) @@ -500,7 +731,7 @@ func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distributi } else if resp.StatusCode == http.StatusNotFound { return distribution.Descriptor{}, distribution.ErrBlobUnknown } - return distribution.Descriptor{}, handleErrorResponse(resp) + return distribution.Descriptor{}, HandleErrorResponse(resp) } func buildCatalogValues(maxEntries int, last string) url.Values { @@ -518,7 +749,11 @@ func buildCatalogValues(maxEntries int, last string) url.Values { } func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst) + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return err + } + blobURL, err := bs.ub.BuildBlobURL(ref) if err != nil { return err } @@ -537,7 +772,7 @@ func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { if SuccessStatus(resp.StatusCode) { return nil } - return handleErrorResponse(resp) + return HandleErrorResponse(resp) } func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { diff --git a/vendor/github.com/docker/distribution/registry/client/repository_test.go b/vendor/github.com/docker/distribution/registry/client/repository_test.go index 1e6eb25f..b7b782c7 100644 --- a/vendor/github.com/docker/distribution/registry/client/repository_test.go +++ b/vendor/github.com/docker/distribution/registry/client/repository_test.go @@ -18,6 +18,7 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/testutil" "github.com/docker/distribution/uuid" @@ -38,16 +39,10 @@ func newRandomBlob(size int) (digest.Digest, []byte) { panic("unable to read enough bytes") } - dgst, err := digest.FromBytes(b) - if err != nil { - panic(err) - } - - return dgst, b + return digest.FromBytes(b), b } func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { - *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", @@ -103,11 +98,11 @@ func addTestCatalog(route string, content []byte, link string, m *testutil.Reque func TestBlobDelete(t *testing.T) { dgst, _ := newRandomBlob(1024) var m testutil.RequestResponseMap - repo := "test.example.com/repo1" + repo, _ := reference.ParseNamed("test.example.com/repo1") m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "DELETE", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusAccepted, @@ -142,7 +137,8 @@ func TestBlobFetch(t *testing.T) { defer c() ctx := context.Background() - r, err := NewRepository(ctx, "test.example.com/repo1", e, nil) + repo, _ := reference.ParseNamed("test.example.com/repo1") + r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } @@ -159,6 +155,59 @@ func TestBlobFetch(t *testing.T) { // TODO(dmcgowan): Test for unknown blob case } +func TestBlobExistsNoContentLength(t *testing.T) { + var m testutil.RequestResponseMap + + repo, _ := reference.ParseNamed("biff") + dgst, content := newRandomBlob(1024) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: content, + Headers: http.Header(map[string][]string{ + // "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + // "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + l := r.Blobs(ctx) + + _, err = l.Stat(ctx, dgst) + if err == nil { + t.Fatal(err) + } + if !strings.Contains(err.Error(), "missing content-length heade") { + t.Fatalf("Expected missing content-length error message") + } + +} + func TestBlobExists(t *testing.T) { d1, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap @@ -168,7 +217,8 @@ func TestBlobExists(t *testing.T) { defer c() ctx := context.Background() - r, err := NewRepository(ctx, "test.example.com/repo1", e, nil) + repo, _ := reference.ParseNamed("test.example.com/repo1") + r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } @@ -199,18 +249,18 @@ func TestBlobUploadChunked(t *testing.T) { b1[512:513], b1[513:1024], } - repo := "test.example.com/uploadrepo" + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") uuids := []string{uuid.Generate().String()} m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", - Route: "/v2/" + repo + "/blobs/uploads/", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, - "Location": {"/v2/" + repo + "/blobs/uploads/" + uuids[0]}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[0]}, "Docker-Upload-UUID": {uuids[0]}, "Range": {"0-0"}, }), @@ -223,14 +273,14 @@ func TestBlobUploadChunked(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PATCH", - Route: "/v2/" + repo + "/blobs/uploads/" + uuids[i], + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i], Body: chunk, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, - "Location": {"/v2/" + repo + "/blobs/uploads/" + uuids[i+1]}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uuids[i+1]}, "Docker-Upload-UUID": {uuids[i+1]}, "Range": {fmt.Sprintf("%d-%d", offset, newOffset-1)}, }), @@ -241,7 +291,7 @@ func TestBlobUploadChunked(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", - Route: "/v2/" + repo + "/blobs/uploads/" + uuids[len(uuids)-1], + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uuids[len(uuids)-1], QueryParams: map[string][]string{ "digest": {dgst.String()}, }, @@ -258,7 +308,7 @@ func TestBlobUploadChunked(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -314,18 +364,18 @@ func TestBlobUploadChunked(t *testing.T) { func TestBlobUploadMonolithic(t *testing.T) { dgst, b1 := newRandomBlob(1024) var m testutil.RequestResponseMap - repo := "test.example.com/uploadrepo" + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") uploadID := uuid.Generate().String() m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "POST", - Route: "/v2/" + repo + "/blobs/uploads/", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, - "Location": {"/v2/" + repo + "/blobs/uploads/" + uploadID}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, "Docker-Upload-UUID": {uploadID}, "Range": {"0-0"}, }), @@ -334,13 +384,13 @@ func TestBlobUploadMonolithic(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PATCH", - Route: "/v2/" + repo + "/blobs/uploads/" + uploadID, + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, Body: b1, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, Headers: http.Header(map[string][]string{ - "Location": {"/v2/" + repo + "/blobs/uploads/" + uploadID}, + "Location": {"/v2/" + repo.Name() + "/blobs/uploads/" + uploadID}, "Docker-Upload-UUID": {uploadID}, "Content-Length": {"0"}, "Docker-Content-Digest": {dgst.String()}, @@ -351,7 +401,7 @@ func TestBlobUploadMonolithic(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", - Route: "/v2/" + repo + "/blobs/uploads/" + uploadID, + Route: "/v2/" + repo.Name() + "/blobs/uploads/" + uploadID, QueryParams: map[string][]string{ "digest": {dgst.String()}, }, @@ -368,7 +418,7 @@ func TestBlobUploadMonolithic(t *testing.T) { m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", - Route: "/v2/" + repo + "/blobs/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -419,7 +469,72 @@ func TestBlobUploadMonolithic(t *testing.T) { } } -func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { +func TestBlobMount(t *testing.T) { + dgst, content := newRandomBlob(1024) + var m testutil.RequestResponseMap + repo, _ := reference.ParseNamed("test.example.com/uploadrepo") + + sourceRepo, _ := reference.ParseNamed("test.example.com/sourcerepo") + canonicalRef, _ := reference.WithDigest(sourceRepo, dgst) + + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "POST", + Route: "/v2/" + repo.Name() + "/blobs/uploads/", + QueryParams: map[string][]string{"from": {sourceRepo.Name()}, "mount": {dgst.String()}}, + }, + Response: testutil.Response{ + StatusCode: http.StatusCreated, + Headers: http.Header(map[string][]string{ + "Content-Length": {"0"}, + "Location": {"/v2/" + repo.Name() + "/blobs/" + dgst.String()}, + "Docker-Content-Digest": {dgst.String()}, + }), + }, + }) + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "HEAD", + Route: "/v2/" + repo.Name() + "/blobs/" + dgst.String(), + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(content))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + + e, c := testServer(m) + defer c() + + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) + if err != nil { + t.Fatal(err) + } + + l := r.Blobs(ctx) + + bw, err := l.Create(ctx, WithMountFrom(canonicalRef)) + if bw != nil { + t.Fatalf("Expected blob writer to be nil, was %v", bw) + } + + if ebm, ok := err.(distribution.ErrBlobMounted); ok { + if ebm.From.Digest() != dgst { + t.Fatalf("Unexpected digest: %s, expected %s", ebm.From.Digest(), dgst) + } + if ebm.From.Name() != sourceRepo.Name() { + t.Fatalf("Unexpected from: %s, expected %s", ebm.From.Name(), sourceRepo) + } + } else { + t.Fatalf("Unexpected error: %v, expected an ErrBlobMounted", err) + } +} + +func newRandomSchemaV1Manifest(name reference.Named, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { blobs := make([]schema1.FSLayer, blobCount) history := make([]schema1.History, blobCount) @@ -431,7 +546,7 @@ func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.Signed } m := schema1.Manifest{ - Name: name, + Name: name.String(), Tag: tag, Architecture: "x86", FSLayers: blobs, @@ -451,24 +566,14 @@ func newRandomSchemaV1Manifest(name, tag string, blobCount int) (*schema1.Signed panic(err) } - p, err := sm.Payload() - if err != nil { - panic(err) - } - - dgst, err := digest.FromBytes(p) - if err != nil { - panic(err) - } - - return sm, dgst, p + return sm, digest.FromBytes(sm.Canonical), sm.Canonical } -func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { - actualDigest, _ := digest.FromBytes(content) +func addTestManifestWithEtag(repo reference.Named, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { + actualDigest := digest.FromBytes(content) getReqWithEtag := testutil.Request{ Method: "GET", - Route: "/v2/" + repo + "/manifests/" + reference, + Route: "/v2/" + repo.Name() + "/manifests/" + reference, Headers: http.Header(map[string][]string{ "If-None-Match": {fmt.Sprintf(`"%s"`, dgst)}, }), @@ -482,6 +587,7 @@ func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {schema1.MediaTypeSignedManifest}, }), } } else { @@ -491,6 +597,7 @@ func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {schema1.MediaTypeSignedManifest}, }), } @@ -498,11 +605,11 @@ func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) } -func addTestManifest(repo, reference string, content []byte, m *testutil.RequestResponseMap) { +func addTestManifest(repo reference.Named, reference string, mediatype string, content []byte, m *testutil.RequestResponseMap) { *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", - Route: "/v2/" + repo + "/manifests/" + reference, + Route: "/v2/" + repo.Name() + "/manifests/" + reference, }, Response: testutil.Response{ StatusCode: http.StatusOK, @@ -510,19 +617,21 @@ func addTestManifest(repo, reference string, content []byte, m *testutil.Request Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, }), }, }) *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", - Route: "/v2/" + repo + "/manifests/" + reference, + Route: "/v2/" + repo.Name() + "/manifests/" + reference, }, Response: testutil.Response{ StatusCode: http.StatusOK, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + "Content-Type": {mediatype}, }), }, }) @@ -555,12 +664,18 @@ func checkEqualManifest(m1, m2 *schema1.SignedManifest) error { return nil } -func TestManifestFetch(t *testing.T) { +func TestV1ManifestFetch(t *testing.T) { ctx := context.Background() - repo := "test.example.com/repo" + repo, _ := reference.ParseNamed("test.example.com/repo") m1, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap - addTestManifest(repo, dgst.String(), m1.Raw, &m) + _, pl, err := m1.Payload() + if err != nil { + t.Fatal(err) + } + addTestManifest(repo, dgst.String(), schema1.MediaTypeSignedManifest, pl, &m) + addTestManifest(repo, "latest", schema1.MediaTypeSignedManifest, pl, &m) + addTestManifest(repo, "badcontenttype", "text/html", pl, &m) e, c := testServer(m) defer c() @@ -574,7 +689,7 @@ func TestManifestFetch(t *testing.T) { t.Fatal(err) } - ok, err := ms.Exists(dgst) + ok, err := ms.Exists(ctx, dgst) if err != nil { t.Fatal(err) } @@ -582,17 +697,48 @@ func TestManifestFetch(t *testing.T) { t.Fatal("Manifest does not exist") } - manifest, err := ms.Get(dgst) + manifest, err := ms.Get(ctx, dgst) if err != nil { t.Fatal(err) } - if err := checkEqualManifest(manifest, m1); err != nil { + v1manifest, ok := manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err := checkEqualManifest(v1manifest, m1); err != nil { + t.Fatal(err) + } + + manifest, err = ms.Get(ctx, dgst, WithTag("latest")) + if err != nil { + t.Fatal(err) + } + v1manifest, ok = manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err = checkEqualManifest(v1manifest, m1); err != nil { + t.Fatal(err) + } + + manifest, err = ms.Get(ctx, dgst, WithTag("badcontenttype")) + if err != nil { + t.Fatal(err) + } + v1manifest, ok = manifest.(*schema1.SignedManifest) + if !ok { + t.Fatalf("Unexpected manifest type from Get: %T", manifest) + } + + if err = checkEqualManifest(v1manifest, m1); err != nil { t.Fatal(err) } } func TestManifestFetchWithEtag(t *testing.T) { - repo := "test.example.com/repo/by/tag" + repo, _ := reference.ParseNamed("test.example.com/repo/by/tag") _, d1, p1 := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap addTestManifestWithEtag(repo, "latest", p1, &m, d1.String()) @@ -600,31 +746,36 @@ func TestManifestFetchWithEtag(t *testing.T) { e, c := testServer(m) defer c() - r, err := NewRepository(context.Background(), repo, e, nil) + ctx := context.Background() + r, err := NewRepository(ctx, repo, e, nil) if err != nil { t.Fatal(err) } - ctx := context.Background() + ms, err := r.Manifests(ctx) if err != nil { t.Fatal(err) } - _, err = ms.GetByTag("latest", AddEtagToTag("latest", d1.String())) + clientManifestService, ok := ms.(*manifests) + if !ok { + panic("wrong type for client manifest service") + } + _, err = clientManifestService.Get(ctx, d1, WithTag("latest"), AddEtagToTag("latest", d1.String())) if err != distribution.ErrManifestNotModified { t.Fatal(err) } } func TestManifestDelete(t *testing.T) { - repo := "test.example.com/repo/delete" + repo, _ := reference.ParseNamed("test.example.com/repo/delete") _, dgst1, _ := newRandomSchemaV1Manifest(repo, "latest", 6) _, dgst2, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "DELETE", - Route: "/v2/" + repo + "/manifests/" + dgst1.String(), + Route: "/v2/" + repo.Name() + "/manifests/" + dgst1.String(), }, Response: testutil.Response{ StatusCode: http.StatusAccepted, @@ -647,24 +798,29 @@ func TestManifestDelete(t *testing.T) { t.Fatal(err) } - if err := ms.Delete(dgst1); err != nil { + if err := ms.Delete(ctx, dgst1); err != nil { t.Fatal(err) } - if err := ms.Delete(dgst2); err == nil { + if err := ms.Delete(ctx, dgst2); err == nil { t.Fatal("Expected error deleting unknown manifest") } // TODO(dmcgowan): Check for specific unknown error } func TestManifestPut(t *testing.T) { - repo := "test.example.com/repo/delete" + repo, _ := reference.ParseNamed("test.example.com/repo/delete") m1, dgst, _ := newRandomSchemaV1Manifest(repo, "other", 6) + + _, payload, err := m1.Payload() + if err != nil { + t.Fatal(err) + } var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "PUT", - Route: "/v2/" + repo + "/manifests/other", - Body: m1.Raw, + Route: "/v2/" + repo.Name() + "/manifests/other", + Body: payload, }, Response: testutil.Response{ StatusCode: http.StatusAccepted, @@ -688,7 +844,7 @@ func TestManifestPut(t *testing.T) { t.Fatal(err) } - if err := ms.Put(m1); err != nil { + if _, err := ms.Put(ctx, m1, WithTag(m1.Tag)); err != nil { t.Fatal(err) } @@ -696,7 +852,7 @@ func TestManifestPut(t *testing.T) { } func TestManifestTags(t *testing.T) { - repo := "test.example.com/repo/tags/list" + repo, _ := reference.ParseNamed("test.example.com/repo/tags/list") tagsList := []byte(strings.TrimSpace(` { "name": "test.example.com/repo/tags/list", @@ -708,21 +864,22 @@ func TestManifestTags(t *testing.T) { } `)) var m testutil.RequestResponseMap - m = append(m, testutil.RequestResponseMapping{ - Request: testutil.Request{ - Method: "GET", - Route: "/v2/" + repo + "/tags/list", - }, - Response: testutil.Response{ - StatusCode: http.StatusOK, - Body: tagsList, - Headers: http.Header(map[string][]string{ - "Content-Length": {fmt.Sprint(len(tagsList))}, - "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, - }), - }, - }) - + for i := 0; i < 3; i++ { + m = append(m, testutil.RequestResponseMapping{ + Request: testutil.Request{ + Method: "GET", + Route: "/v2/" + repo.Name() + "/tags/list", + }, + Response: testutil.Response{ + StatusCode: http.StatusOK, + Body: tagsList, + Headers: http.Header(map[string][]string{ + "Content-Length": {fmt.Sprint(len(tagsList))}, + "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, + }), + }, + }) + } e, c := testServer(m) defer c() @@ -730,34 +887,41 @@ func TestManifestTags(t *testing.T) { if err != nil { t.Fatal(err) } + ctx := context.Background() - ms, err := r.Manifests(ctx) + tagService := r.Tags(ctx) + + tags, err := tagService.All(ctx) if err != nil { t.Fatal(err) } - - tags, err := ms.Tags() - if err != nil { - t.Fatal(err) - } - if len(tags) != 3 { t.Fatalf("Wrong number of tags returned: %d, expected 3", len(tags)) } - // TODO(dmcgowan): Check array + expected := map[string]struct{}{ + "tag1": {}, + "tag2": {}, + "funtag": {}, + } + for _, t := range tags { + delete(expected, t) + } + if len(expected) != 0 { + t.Fatalf("unexpected tags returned: %v", expected) + } // TODO(dmcgowan): Check for error cases } func TestManifestUnauthorized(t *testing.T) { - repo := "test.example.com/repo" + repo, _ := reference.ParseNamed("test.example.com/repo") _, dgst, _ := newRandomSchemaV1Manifest(repo, "latest", 6) var m testutil.RequestResponseMap m = append(m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", - Route: "/v2/" + repo + "/manifests/" + dgst.String(), + Route: "/v2/" + repo.Name() + "/manifests/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusUnauthorized, @@ -778,7 +942,7 @@ func TestManifestUnauthorized(t *testing.T) { t.Fatal(err) } - _, err = ms.Get(dgst) + _, err = ms.Get(ctx, dgst) if err == nil { t.Fatal("Expected error fetching manifest") } diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go index b2e74ddb..b27b6c23 100644 --- a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go +++ b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go @@ -2,11 +2,9 @@ package transport import ( "bufio" - "bytes" "errors" "fmt" "io" - "io/ioutil" "net/http" "os" ) @@ -21,11 +19,11 @@ type ReadSeekCloser interface { // request. When seeking and starting a read from a non-zero offset // the a "Range" header will be added which sets the offset. // TODO(dmcgowan): Move this into a separate utility package -func NewHTTPReadSeeker(client *http.Client, url string, size int64) ReadSeekCloser { +func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { return &httpReadSeeker{ - client: client, - url: url, - size: size, + client: client, + url: url, + errorHandler: errorHandler, } } @@ -33,12 +31,26 @@ type httpReadSeeker struct { client *http.Client url string + // errorHandler creates an error from an unsuccessful HTTP response. + // This allows the error to be created with the HTTP response body + // without leaking the body through a returned error. + errorHandler func(*http.Response) error + size int64 - rc io.ReadCloser // remote read closer - brd *bufio.Reader // internal buffered io - offset int64 - err error + // rc is the remote read closer. + rc io.ReadCloser + // brd is a buffer for internal buffered io. + brd *bufio.Reader + // readerOffset tracks the offset as of the last read. + readerOffset int64 + // seekOffset allows Seek to override the offset. Seek changes + // seekOffset instead of changing readOffset directly so that + // connection resets can be delayed and possibly avoided if the + // seek is undone (i.e. seeking to the end and then back to the + // beginning). + seekOffset int64 + err error } func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { @@ -46,16 +58,29 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { return 0, hrs.err } + // If we seeked to a different position, we need to reset the + // connection. This logic is here instead of Seek so that if + // a seek is undone before the next read, the connection doesn't + // need to be closed and reopened. A common example of this is + // seeking to the end to determine the length, and then seeking + // back to the original position. + if hrs.readerOffset != hrs.seekOffset { + hrs.reset() + } + + hrs.readerOffset = hrs.seekOffset + rd, err := hrs.reader() if err != nil { return 0, err } n, err = rd.Read(p) - hrs.offset += int64(n) + hrs.seekOffset += int64(n) + hrs.readerOffset += int64(n) // Simulate io.EOF error if we reach filesize. - if err == nil && hrs.offset >= hrs.size { + if err == nil && hrs.size >= 0 && hrs.readerOffset >= hrs.size { err = io.EOF } @@ -67,13 +92,20 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { return 0, hrs.err } - var err error - newOffset := hrs.offset + _, err := hrs.reader() + if err != nil { + return 0, err + } + + newOffset := hrs.seekOffset switch whence { case os.SEEK_CUR: newOffset += int64(offset) case os.SEEK_END: + if hrs.size < 0 { + return 0, errors.New("content length not known") + } newOffset = hrs.size + int64(offset) case os.SEEK_SET: newOffset = int64(offset) @@ -82,15 +114,10 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { if newOffset < 0 { err = errors.New("cannot seek to negative position") } else { - if hrs.offset != newOffset { - hrs.reset() - } - - // No problems, set the offset. - hrs.offset = newOffset + hrs.seekOffset = newOffset } - return hrs.offset, err + return hrs.seekOffset, err } func (hrs *httpReadSeeker) Close() error { @@ -130,17 +157,12 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { return hrs.brd, nil } - // If the offset is great than or equal to size, return a empty, noop reader. - if hrs.offset >= hrs.size { - return ioutil.NopCloser(bytes.NewReader([]byte{})), nil - } - req, err := http.NewRequest("GET", hrs.url, nil) if err != nil { return nil, err } - if hrs.offset > 0 { + if hrs.readerOffset > 0 { // TODO(stevvooe): Get this working correctly. // If we are at different offset, issue a range request from there. @@ -158,8 +180,16 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) { // import if resp.StatusCode >= 200 && resp.StatusCode <= 399 { hrs.rc = resp.Body + if resp.StatusCode == http.StatusOK { + hrs.size = resp.ContentLength + } else { + hrs.size = -1 + } } else { defer resp.Body.Close() + if hrs.errorHandler != nil { + return nil, hrs.errorHandler(resp) + } return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) } diff --git a/vendor/github.com/docker/distribution/registry/storage/blob_test.go b/vendor/github.com/docker/distribution/registry/storage/blob_test.go index c84c7432..246648b0 100644 --- a/vendor/github.com/docker/distribution/registry/storage/blob_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/blob_test.go @@ -12,6 +12,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver/inmemory" "github.com/docker/distribution/testutil" @@ -20,18 +21,13 @@ import ( // TestSimpleBlobUpload covers the blob upload process, exercising common // error paths that might be seen during an upload. func TestSimpleBlobUpload(t *testing.T) { - randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile() + randomDataReader, dgst, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random reader: %v", err) } - dgst := digest.Digest(tarSumStr) - if err != nil { - t.Fatalf("error allocating upload store: %v", err) - } - ctx := context.Background() - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { @@ -176,10 +172,7 @@ func TestSimpleBlobUpload(t *testing.T) { if err != nil { t.Fatalf("Error reading all of blob %s", err.Error()) } - expectedDigest, err := digest.FromBytes(randomBlob) - if err != nil { - t.Fatalf("Error getting digest from bytes: %s", err) - } + expectedDigest := digest.FromBytes(randomBlob) simpleUpload(t, bs, randomBlob, expectedDigest) d, err = bs.Stat(ctx, expectedDigest) @@ -216,7 +209,7 @@ func TestSimpleBlobUpload(t *testing.T) { // other tests. func TestSimpleBlobRead(t *testing.T) { ctx := context.Background() - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { @@ -228,13 +221,11 @@ func TestSimpleBlobRead(t *testing.T) { } bs := repository.Blobs(ctx) - randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. + randomLayerReader, dgst, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string. if err != nil { t.Fatalf("error creating random data: %v", err) } - dgst := digest.Digest(tarSumStr) - // Test for existence. desc, err := bs.Stat(ctx, dgst) if err != distribution.ErrBlobUnknown { @@ -320,10 +311,168 @@ func TestSimpleBlobRead(t *testing.T) { } } +// TestBlobMount covers the blob mount process, exercising common +// error paths that might be seen during a mount. +func TestBlobMount(t *testing.T) { + randomDataReader, dgst, err := testutil.CreateRandomTarFile() + if err != nil { + t.Fatalf("error creating random reader: %v", err) + } + + ctx := context.Background() + imageName, _ := reference.ParseNamed("foo/bar") + sourceImageName, _ := reference.ParseNamed("foo/source") + driver := inmemory.New() + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + if err != nil { + t.Fatalf("error creating registry: %v", err) + } + + repository, err := registry.Repository(ctx, imageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + sourceRepository, err := registry.Repository(ctx, sourceImageName) + if err != nil { + t.Fatalf("unexpected error getting repo: %v", err) + } + + sbs := sourceRepository.Blobs(ctx) + + blobUpload, err := sbs.Create(ctx) + + if err != nil { + t.Fatalf("unexpected error starting layer upload: %s", err) + } + + // Get the size of our random tarfile + randomDataSize, err := seekerSize(randomDataReader) + if err != nil { + t.Fatalf("error getting seeker size of random data: %v", err) + } + + nn, err := io.Copy(blobUpload, randomDataReader) + if err != nil { + t.Fatalf("unexpected error uploading layer data: %v", err) + } + + desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst}) + if err != nil { + t.Fatalf("unexpected error finishing layer upload: %v", err) + } + + // Test for existence. + statDesc, err := sbs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, sbs) + } + + if statDesc != desc { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + bs := repository.Blobs(ctx) + // Test destination for existence. + statDesc, err = bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating unmounted blob: %v", desc) + } + + canonicalRef, err := reference.WithDigest(sourceRepository.Name(), desc.Digest) + if err != nil { + t.Fatal(err) + } + + bw, err := bs.Create(ctx, WithMountFrom(canonicalRef)) + if bw != nil { + t.Fatal("unexpected blobwriter returned from Create call, should mount instead") + } + + ebm, ok := err.(distribution.ErrBlobMounted) + if !ok { + t.Fatalf("unexpected error mounting layer: %v", err) + } + + if ebm.Descriptor != desc { + t.Fatalf("descriptors not equal: %v != %v", ebm.Descriptor, desc) + } + + // Test for existence. + statDesc, err = bs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs) + } + + if statDesc != desc { + t.Fatalf("descriptors not equal: %v != %v", statDesc, desc) + } + + rc, err := bs.Open(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error opening blob for read: %v", err) + } + defer rc.Close() + + h := sha256.New() + nn, err = io.Copy(h, rc) + if err != nil { + t.Fatalf("error reading layer: %v", err) + } + + if nn != randomDataSize { + t.Fatalf("incorrect read length") + } + + if digest.NewDigest("sha256", h) != dgst { + t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), dgst) + } + + // Delete the blob from the source repo + err = sbs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob") + } + + d, err := bs.Stat(ctx, desc.Digest) + if err != nil { + t.Fatalf("unexpected error stating blob deleted from source repository: %v", err) + } + + d, err = sbs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } + + // Delete the blob from the dest repo + err = bs.Delete(ctx, desc.Digest) + if err != nil { + t.Fatalf("Unexpected error deleting blob") + } + + d, err = bs.Stat(ctx, desc.Digest) + if err == nil { + t.Fatalf("unexpected non-error stating deleted blob: %v", d) + } + + switch err { + case distribution.ErrBlobUnknown: + break + default: + t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err) + } +} + // TestLayerUploadZeroLength uploads zero-length func TestLayerUploadZeroLength(t *testing.T) { ctx := context.Background() - imageName := "foo/bar" + imageName, _ := reference.ParseNamed("foo/bar") driver := inmemory.New() registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { @@ -361,7 +510,7 @@ func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expec if dgst != expectedDigest { // sanity check on zero digest - t.Fatalf("digest not as expected: %v != %v", dgst, digest.DigestTarSumV1EmptyTar) + t.Fatalf("digest not as expected: %v != %v", dgst, expectedDigest) } desc, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}) diff --git a/vendor/github.com/docker/distribution/registry/storage/blobserver.go b/vendor/github.com/docker/distribution/registry/storage/blobserver.go index 24aeba69..2655e011 100644 --- a/vendor/github.com/docker/distribution/registry/storage/blobserver.go +++ b/vendor/github.com/docker/distribution/registry/storage/blobserver.go @@ -34,46 +34,45 @@ func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *h return err } - redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) - - switch err { - case nil: - if bs.redirect { + if bs.redirect { + redirectURL, err := bs.driver.URLFor(ctx, path, map[string]interface{}{"method": r.Method}) + switch err.(type) { + case nil: // Redirect to storage URL. http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) return err - } - fallthrough - case driver.ErrUnsupportedMethod: - // Fallback to serving the content directly. - br, err := newFileReader(ctx, bs.driver, path, desc.Size) - if err != nil { + case driver.ErrUnsupportedMethod: + // Fallback to serving the content directly. + default: + // Some unexpected error. return err } - defer br.Close() - - w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent - w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) - - if w.Header().Get("Docker-Content-Digest") == "" { - w.Header().Set("Docker-Content-Digest", desc.Digest.String()) - } - - if w.Header().Get("Content-Type") == "" { - // Set the content type if not already set. - w.Header().Set("Content-Type", desc.MediaType) - } - - if w.Header().Get("Content-Length") == "" { - // Set the content length if not already set. - w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) - } - - http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) - return nil } - // Some unexpected error. - return err + br, err := newFileReader(ctx, bs.driver, path, desc.Size) + if err != nil { + return err + } + defer br.Close() + + w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent + w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds())) + + if w.Header().Get("Docker-Content-Digest") == "" { + w.Header().Set("Docker-Content-Digest", desc.Digest.String()) + } + + if w.Header().Get("Content-Type") == "" { + // Set the content type if not already set. + w.Header().Set("Content-Type", desc.MediaType) + } + + if w.Header().Get("Content-Length") == "" { + // Set the content length if not already set. + w.Header().Set("Content-Length", fmt.Sprint(desc.Size)) + } + + http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br) + return nil } diff --git a/vendor/github.com/docker/distribution/registry/storage/blobstore.go b/vendor/github.com/docker/distribution/registry/storage/blobstore.go index f6a8ac43..f8fe23fe 100644 --- a/vendor/github.com/docker/distribution/registry/storage/blobstore.go +++ b/vendor/github.com/docker/distribution/registry/storage/blobstore.go @@ -56,12 +56,7 @@ func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (distribution // content is already present, only the digest will be returned. This should // only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - dgst, err := digest.FromBytes(p) - if err != nil { - context.GetLogger(ctx).Errorf("blobStore: error digesting content: %v, %s", err, string(p)) - return distribution.Descriptor{}, err - } - + dgst := digest.FromBytes(p) desc, err := bs.statter.Stat(ctx, dgst) if err == nil { // content already present diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter.go index b384fa8a..e485cc6d 100644 --- a/vendor/github.com/docker/distribution/registry/storage/blobwriter.go +++ b/vendor/github.com/docker/distribution/registry/storage/blobwriter.go @@ -227,6 +227,7 @@ func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descri if err != nil { return distribution.Descriptor{}, err } + defer fr.Close() tr := io.TeeReader(fr, digester.Hash()) @@ -301,7 +302,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor // get a hash, then the underlying file is deleted, we risk moving // a zero-length blob into a nonzero-length blob location. To // prevent this horrid thing, we employ the hack of only allowing - // to this happen for the zero tarsum. + // to this happen for the digest of an empty tar. if desc.Digest == digest.DigestSha256EmptyTar { return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{}) } @@ -325,7 +326,7 @@ func (bw *blobWriter) moveBlob(ctx context.Context, desc distribution.Descriptor // resources are already not present, no error will be returned. func (bw *blobWriter) removeResources(ctx context.Context) error { dataPath, err := pathFor(uploadDataPathSpec{ - name: bw.blobStore.repository.Name(), + name: bw.blobStore.repository.Name().Name(), id: bw.id, }) diff --git a/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go b/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go index 26d3beab..fc62bcc4 100644 --- a/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go +++ b/vendor/github.com/docker/distribution/registry/storage/blobwriter_resumable.go @@ -91,6 +91,7 @@ func (bw *blobWriter) resumeDigestAt(ctx context.Context, offset int64) error { if err != nil { return err } + defer fr.Close() if _, err = fr.Seek(int64(h.Len()), os.SEEK_SET); err != nil { return fmt.Errorf("unable to seek to layer reader offset %d: %s", h.Len(), err) @@ -112,7 +113,7 @@ type hashStateEntry struct { // getStoredHashStates returns a slice of hashStateEntries for this upload. func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) { uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Name(), + name: bw.blobStore.repository.Name().String(), id: bw.id, alg: bw.digester.Digest().Algorithm(), list: true, @@ -158,7 +159,7 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error { } uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{ - name: bw.blobStore.repository.Name(), + name: bw.blobStore.repository.Name().String(), id: bw.id, alg: bw.digester.Digest().Algorithm(), offset: int64(h.Len()), diff --git a/vendor/github.com/docker/distribution/registry/storage/catalog.go b/vendor/github.com/docker/distribution/registry/storage/catalog.go index b6768012..481489f2 100644 --- a/vendor/github.com/docker/distribution/registry/storage/catalog.go +++ b/vendor/github.com/docker/distribution/registry/storage/catalog.go @@ -4,19 +4,22 @@ import ( "errors" "io" "path" - "sort" "strings" "github.com/docker/distribution/context" "github.com/docker/distribution/registry/storage/driver" ) +// ErrFinishedWalk is used when the called walk function no longer wants +// to accept any more values. This is used for pagination when the +// required number of repos have been found. +var ErrFinishedWalk = errors.New("finished walk") + // Returns a list, or partial list, of repositories in the registry. // Because it's a quite expensive operation, it should only be used when building up // an initial set of repositories. -func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) { +func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (n int, errVal error) { var foundRepos []string - var errVal error if len(repos) == 0 { return 0, errors.New("no space in slice") @@ -27,12 +30,7 @@ func (reg *registry) Repositories(ctx context.Context, repos []string, last stri return 0, err } - // Walk each of the directories in our storage. Unfortunately since there's no - // guarantee that storage will return files in lexigraphical order, we have - // to store everything another slice, sort it and then copy it back to our - // passed in slice. - - Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { + err = Walk(ctx, reg.blobStore.driver, root, func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() // lop the base path off @@ -49,17 +47,20 @@ func (reg *registry) Repositories(ctx context.Context, repos []string, last stri return ErrSkipDir } + // if we've filled our array, no need to walk any further + if len(foundRepos) == len(repos) { + return ErrFinishedWalk + } + return nil }) - sort.Strings(foundRepos) n = copy(repos, foundRepos) // Signal that we have no more entries by setting EOF - if len(foundRepos) <= len(repos) { + if len(foundRepos) <= len(repos) && err != ErrFinishedWalk { errVal = io.EOF } return n, errVal - } diff --git a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go index f01088ba..0c0c622c 100644 --- a/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go +++ b/vendor/github.com/docker/distribution/registry/storage/linkedblobstore.go @@ -1,12 +1,14 @@ package storage import ( + "fmt" "net/http" "time" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/uuid" ) @@ -20,6 +22,7 @@ type linkPathFunc func(name string, dgst digest.Digest) (string, error) // that grant access to the global blob store. type linkedBlobStore struct { *blobStore + registry *registry blobServer distribution.BlobServer blobAccessController distribution.BlobDescriptorService repository distribution.Repository @@ -75,10 +78,7 @@ func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter } func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - dgst, err := digest.FromBytes(p) - if err != nil { - return distribution.Descriptor{}, err - } + dgst := digest.FromBytes(p) // Place the data in the blob store first. desc, err := lbs.blobStore.Put(ctx, mediaType, p) if err != nil { @@ -97,15 +97,63 @@ func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) return desc, lbs.linkBlob(ctx, desc) } +// createOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type createOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + } +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*createOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + // Writer begins a blob write session, returning a handle. -func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter, error) { +func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { context.GetLogger(ctx).Debug("(*linkedBlobStore).Writer") + var opts createOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + if opts.Mount.ShouldMount { + desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest()) + if err == nil { + // Mount successful, no need to initiate an upload session + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + } + } + uuid := uuid.Generate().String() startedAt := time.Now().UTC() path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Name(), + name: lbs.repository.Name().Name(), id: uuid, }) @@ -114,7 +162,7 @@ func (lbs *linkedBlobStore) Create(ctx context.Context) (distribution.BlobWriter } startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Name(), + name: lbs.repository.Name().Name(), id: uuid, }) @@ -134,7 +182,7 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution context.GetLogger(ctx).Debug("(*linkedBlobStore).Resume") startedAtPath, err := pathFor(uploadStartedAtPathSpec{ - name: lbs.repository.Name(), + name: lbs.repository.Name().Name(), id: id, }) @@ -158,7 +206,7 @@ func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution } path, err := pathFor(uploadDataPathSpec{ - name: lbs.repository.Name(), + name: lbs.repository.Name().Name(), id: id, }) @@ -188,6 +236,28 @@ func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) erro return nil } +func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (distribution.Descriptor, error) { + repo, err := lbs.registry.Repository(ctx, sourceRepo) + if err != nil { + return distribution.Descriptor{}, err + } + stat, err := repo.Blobs(ctx).Stat(ctx, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + + desc := distribution.Descriptor{ + Size: stat.Size, + + // NOTE(stevvooe): The central blob store firewalls media types from + // other users. The caller should look this up and override the value + // for the specific repository. + MediaType: "application/octet-stream", + Digest: dgst, + } + return desc, lbs.linkBlob(ctx, desc) +} + // newBlobUpload allocates a new upload controller with the given state. func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time) (distribution.BlobWriter, error) { fw, err := newFileWriter(ctx, lbs.driver, path) @@ -228,7 +298,7 @@ func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical distribution } seenDigests[dgst] = struct{}{} - blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst) if err != nil { return err } @@ -285,7 +355,7 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis } if target != dgst { - // Track when we are doing cross-digest domain lookups. ie, tarsum to sha256. + // Track when we are doing cross-digest domain lookups. ie, sha512 to sha256. context.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target) } @@ -298,7 +368,7 @@ func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (dis func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) { // clear any possible existence of a link described in linkPathFns for _, linkPathFn := range lbs.linkPathFns { - blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst) if err != nil { return err } @@ -321,7 +391,7 @@ func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (er // linkPathFuncs to let us try a few different paths before returning not // found. func (lbs *linkedBlobStatter) resolveWithLinkFunc(ctx context.Context, dgst digest.Digest, linkPathFn linkPathFunc) (digest.Digest, error) { - blobLinkPath, err := linkPathFn(lbs.repository.Name(), dgst) + blobLinkPath, err := linkPathFn(lbs.repository.Name().Name(), dgst) if err != nil { return "", err } diff --git a/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go b/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go new file mode 100644 index 00000000..42027d13 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/manifestlisthandler.go @@ -0,0 +1,96 @@ +package storage + +import ( + "fmt" + + "encoding/json" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/manifestlist" +) + +// manifestListHandler is a ManifestHandler that covers schema2 manifest lists. +type manifestListHandler struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context +} + +var _ ManifestHandler = &manifestListHandler{} + +func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal") + + var m manifestlist.DeserializedManifestList + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put") + + m, ok := manifestList.(*manifestlist.DeserializedManifestList) + if !ok { + return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + // Link the revision into the repository. + if err := ms.blobStore.linkBlob(ctx, revision); err != nil { + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to +// store valid content, leaving trust policies of that content up to +// consumers. +func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst manifestlist.DeserializedManifestList, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if !skipDependencyVerification { + // This manifest service is different from the blob service + // returned by Blob. It uses a linked blob store to ensure that + // only manifests are accessible. + manifestService, err := ms.repository.Manifests(ctx) + if err != nil { + return err + } + + for _, manifestDescriptor := range mnfst.References() { + exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest) + if err != nil && err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + if err != nil || !exists { + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/manifeststore.go b/vendor/github.com/docker/distribution/registry/storage/manifeststore.go index db49aaa4..33c0c351 100644 --- a/vendor/github.com/docker/distribution/registry/storage/manifeststore.go +++ b/vendor/github.com/docker/distribution/registry/storage/manifeststore.go @@ -3,27 +3,59 @@ package storage import ( "fmt" + "encoding/json" "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest" + "github.com/docker/distribution/manifest/manifestlist" "github.com/docker/distribution/manifest/schema1" - "github.com/docker/libtrust" + "github.com/docker/distribution/manifest/schema2" ) +// A ManifestHandler gets and puts manifests of a particular type. +type ManifestHandler interface { + // Unmarshal unmarshals the manifest from a byte slice. + Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest. + Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) +} + +// SkipLayerVerification allows a manifest to be Put before its +// layers are on the filesystem +func SkipLayerVerification() distribution.ManifestServiceOption { + return skipLayerOption{} +} + +type skipLayerOption struct{} + +func (o skipLayerOption) Apply(m distribution.ManifestService) error { + if ms, ok := m.(*manifestStore); ok { + ms.skipDependencyVerification = true + return nil + } + return fmt.Errorf("skip layer verification only valid for manifestStore") +} + type manifestStore struct { - repository *repository - revisionStore *revisionStore - tagStore *tagStore - ctx context.Context + repository *repository + blobStore *linkedBlobStore + ctx context.Context + skipDependencyVerification bool + + schema1Handler ManifestHandler + schema2Handler ManifestHandler + manifestListHandler ManifestHandler } var _ distribution.ManifestService = &manifestStore{} -func (ms *manifestStore) Exists(dgst digest.Digest) (bool, error) { +func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { context.GetLogger(ms.ctx).Debug("(*manifestStore).Exists") - _, err := ms.revisionStore.blobStore.Stat(ms.ctx, dgst) + _, err := ms.blobStore.Stat(ms.ctx, dgst) if err != nil { if err == distribution.ErrBlobUnknown { return false, nil @@ -35,110 +67,68 @@ func (ms *manifestStore) Exists(dgst digest.Digest) (bool, error) { return true, nil } -func (ms *manifestStore) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { +func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { context.GetLogger(ms.ctx).Debug("(*manifestStore).Get") - return ms.revisionStore.get(ms.ctx, dgst) -} -// SkipLayerVerification allows a manifest to be Put before it's -// layers are on the filesystem -func SkipLayerVerification(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifestStore); ok { - ms.skipDependencyVerification = true - return nil - } - return fmt.Errorf("skip layer verification only valid for manifeststore") -} + // TODO(stevvooe): Need to check descriptor from above to ensure that the + // mediatype is as we expect for the manifest store. -func (ms *manifestStore) Put(manifest *schema1.SignedManifest) error { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") - - if err := ms.verifyManifest(ms.ctx, manifest); err != nil { - return err - } - - // Store the revision of the manifest - revision, err := ms.revisionStore.put(ms.ctx, manifest) + content, err := ms.blobStore.Get(ctx, dgst) if err != nil { - return err - } - - // Now, tag the manifest - return ms.tagStore.tag(manifest.Tag, revision.Digest) -} - -// Delete removes the revision of the specified manfiest. -func (ms *manifestStore) Delete(dgst digest.Digest) error { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") - return ms.revisionStore.delete(ms.ctx, dgst) -} - -func (ms *manifestStore) Tags() ([]string, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).Tags") - return ms.tagStore.tags() -} - -func (ms *manifestStore) ExistsByTag(tag string) (bool, error) { - context.GetLogger(ms.ctx).Debug("(*manifestStore).ExistsByTag") - return ms.tagStore.exists(tag) -} - -func (ms *manifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) { - for _, option := range options { - err := option(ms) - if err != nil { - return nil, err + if err == distribution.ErrBlobUnknown { + return nil, distribution.ErrManifestUnknownRevision{ + Name: ms.repository.Name().Name(), + Revision: dgst, + } } - } - context.GetLogger(ms.ctx).Debug("(*manifestStore).GetByTag") - dgst, err := ms.tagStore.resolve(tag) - if err != nil { return nil, err } - return ms.revisionStore.get(ms.ctx, dgst) -} - -// verifyManifest ensures that the manifest content is valid from the -// perspective of the registry. It ensures that the signature is valid for the -// enclosed payload. As a policy, the registry only tries to store valid -// content, leaving trust policies of that content up to consumers. -func (ms *manifestStore) verifyManifest(ctx context.Context, mnfst *schema1.SignedManifest) error { - var errs distribution.ErrManifestVerification - if mnfst.Name != ms.repository.Name() { - errs = append(errs, fmt.Errorf("repository name does not match manifest name")) + var versioned manifest.Versioned + if err = json.Unmarshal(content, &versioned); err != nil { + return nil, err } - if _, err := schema1.Verify(mnfst); err != nil { - switch err { - case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: - errs = append(errs, distribution.ErrManifestUnverified{}) + switch versioned.SchemaVersion { + case 1: + return ms.schema1Handler.Unmarshal(ctx, dgst, content) + case 2: + // This can be an image manifest or a manifest list + switch versioned.MediaType { + case schema2.MediaTypeManifest: + return ms.schema2Handler.Unmarshal(ctx, dgst, content) + case manifestlist.MediaTypeManifestList: + return ms.manifestListHandler.Unmarshal(ctx, dgst, content) default: - if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust - errs = append(errs, distribution.ErrManifestUnverified{}) - } else { - errs = append(errs, err) - } + return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)} } } - if !ms.skipDependencyVerification { - for _, fsLayer := range mnfst.FSLayers { - _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.BlobSum) - if err != nil { - if err != distribution.ErrBlobUnknown { - errs = append(errs, err) - } - - // On error here, we always append unknown blob errors. - errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.BlobSum}) - } - } - } - if len(errs) != 0 { - return errs - } - - return nil + return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion) +} + +func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Put") + + switch manifest.(type) { + case *schema1.SignedManifest: + return ms.schema1Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *schema2.DeserializedManifest: + return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification) + case *manifestlist.DeserializedManifestList: + return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification) + } + + return "", fmt.Errorf("unrecognized manifest type %T", manifest) +} + +// Delete removes the revision of the specified manfiest. +func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error { + context.GetLogger(ms.ctx).Debug("(*manifestStore).Delete") + return ms.blobStore.Delete(ctx, dgst) +} + +func (ms *manifestStore) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + return 0, distribution.ErrUnsupported } diff --git a/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go b/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go index 30126e4b..7885c466 100644 --- a/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/manifeststore_test.go @@ -11,6 +11,7 @@ import ( "github.com/docker/distribution/digest" "github.com/docker/distribution/manifest" "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/storage/cache/memory" "github.com/docker/distribution/registry/storage/driver" "github.com/docker/distribution/registry/storage/driver/inmemory" @@ -23,14 +24,15 @@ type manifestStoreTestEnv struct { driver driver.StorageDriver registry distribution.Namespace repository distribution.Repository - name string + name reference.Named tag string } -func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv { +func newManifestStoreTestEnv(t *testing.T, name reference.Named, tag string) *manifestStoreTestEnv { ctx := context.Background() driver := inmemory.New() - registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) + registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider( + memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect) if err != nil { t.Fatalf("error creating registry: %v", err) } @@ -51,36 +53,19 @@ func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestE } func TestManifestStorage(t *testing.T) { - env := newManifestStoreTestEnv(t, "foo/bar", "thetag") + repoName, _ := reference.ParseNamed("foo/bar") + env := newManifestStoreTestEnv(t, repoName, "thetag") ctx := context.Background() ms, err := env.repository.Manifests(ctx) if err != nil { t.Fatal(err) } - exists, err := ms.ExistsByTag(env.tag) - if err != nil { - t.Fatalf("unexpected error checking manifest existence: %v", err) - } - - if exists { - t.Fatalf("manifest should not exist") - } - - if _, err := ms.GetByTag(env.tag); true { - switch err.(type) { - case distribution.ErrManifestUnknown: - break - default: - t.Fatalf("expected manifest unknown error: %#v", err) - } - } - m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, - Name: env.name, + Name: env.name.Name(), Tag: env.tag, } @@ -98,6 +83,10 @@ func TestManifestStorage(t *testing.T) { m.FSLayers = append(m.FSLayers, schema1.FSLayer{ BlobSum: dgst, }) + m.History = append(m.History, schema1.History{ + V1Compatibility: "", + }) + } pk, err := libtrust.GenerateECP256PrivateKey() @@ -110,7 +99,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("error signing manifest: %v", err) } - err = ms.Put(sm) + _, err = ms.Put(ctx, sm) if err == nil { t.Fatalf("expected errors putting manifest with full verification") } @@ -146,30 +135,40 @@ func TestManifestStorage(t *testing.T) { } } - if err = ms.Put(sm); err != nil { + var manifestDigest digest.Digest + if manifestDigest, err = ms.Put(ctx, sm); err != nil { t.Fatalf("unexpected error putting manifest: %v", err) } - exists, err = ms.ExistsByTag(env.tag) + exists, err := ms.Exists(ctx, manifestDigest) if err != nil { - t.Fatalf("unexpected error checking manifest existence: %v", err) + t.Fatalf("unexpected error checking manifest existence: %#v", err) } if !exists { t.Fatalf("manifest should exist") } - fetchedManifest, err := ms.GetByTag(env.tag) - + fromStore, err := ms.Get(ctx, manifestDigest) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } + fetchedManifest, ok := fromStore.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected manifest type from signedstore") + } + if !reflect.DeepEqual(fetchedManifest, sm) { t.Fatalf("fetched manifest not equal: %#v != %#v", fetchedManifest, sm) } - fetchedJWS, err := libtrust.ParsePrettySignature(fetchedManifest.Raw, "signatures") + _, pl, err := fetchedManifest.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } + + fetchedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") if err != nil { t.Fatalf("unexpected error parsing jws: %v", err) } @@ -181,12 +180,9 @@ func TestManifestStorage(t *testing.T) { // Now that we have a payload, take a moment to check that the manifest is // return by the payload digest. - dgst, err := digest.FromBytes(payload) - if err != nil { - t.Fatalf("error getting manifest digest: %v", err) - } - exists, err = ms.Exists(dgst) + dgst := digest.FromBytes(payload) + exists, err = ms.Exists(ctx, dgst) if err != nil { t.Fatalf("error checking manifest existence by digest: %v", err) } @@ -195,7 +191,7 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("manifest %s should exist", dgst) } - fetchedByDigest, err := ms.Get(dgst) + fetchedByDigest, err := ms.Get(ctx, dgst) if err != nil { t.Fatalf("unexpected error fetching manifest by digest: %v", err) } @@ -213,20 +209,6 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected number of signatures: %d != %d", len(sigs), 1) } - // Grabs the tags and check that this tagged manifest is present - tags, err := ms.Tags() - if err != nil { - t.Fatalf("unexpected error fetching tags: %v", err) - } - - if len(tags) != 1 { - t.Fatalf("unexpected tags returned: %v", tags) - } - - if tags[0] != env.tag { - t.Fatalf("unexpected tag found in tags: %v != %v", tags, []string{env.tag}) - } - // Now, push the same manifest with a different key pk2, err := libtrust.GenerateECP256PrivateKey() if err != nil { @@ -237,8 +219,12 @@ func TestManifestStorage(t *testing.T) { if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } + _, pl, err = sm2.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } - jws2, err := libtrust.ParsePrettySignature(sm2.Raw, "signatures") + jws2, err := libtrust.ParsePrettySignature(pl, "signatures") if err != nil { t.Fatalf("error parsing signature: %v", err) } @@ -252,15 +238,20 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected number of signatures: %d != %d", len(sigs2), 1) } - if err = ms.Put(sm2); err != nil { + if manifestDigest, err = ms.Put(ctx, sm2); err != nil { t.Fatalf("unexpected error putting manifest: %v", err) } - fetched, err := ms.GetByTag(env.tag) + fromStore, err = ms.Get(ctx, manifestDigest) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } + fetched, ok := fromStore.(*schema1.SignedManifest) + if !ok { + t.Fatalf("unexpected type from signed manifeststore : %T", fetched) + } + if _, err := schema1.Verify(fetched); err != nil { t.Fatalf("unexpected error verifying manifest: %v", err) } @@ -276,7 +267,12 @@ func TestManifestStorage(t *testing.T) { t.Fatalf("unexpected error getting expected signatures: %v", err) } - receivedJWS, err := libtrust.ParsePrettySignature(fetched.Raw, "signatures") + _, pl, err = fetched.Payload() + if err != nil { + t.Fatalf("error getting payload %#v", err) + } + + receivedJWS, err := libtrust.ParsePrettySignature(pl, "signatures") if err != nil { t.Fatalf("unexpected error parsing jws: %v", err) } @@ -302,12 +298,12 @@ func TestManifestStorage(t *testing.T) { } // Test deleting manifests - err = ms.Delete(dgst) + err = ms.Delete(ctx, dgst) if err != nil { t.Fatalf("unexpected an error deleting manifest by digest: %v", err) } - exists, err = ms.Exists(dgst) + exists, err = ms.Exists(ctx, dgst) if err != nil { t.Fatalf("Error querying manifest existence") } @@ -315,7 +311,7 @@ func TestManifestStorage(t *testing.T) { t.Errorf("Deleted manifest should not exist") } - deletedManifest, err := ms.Get(dgst) + deletedManifest, err := ms.Get(ctx, dgst) if err == nil { t.Errorf("Unexpected success getting deleted manifest") } @@ -331,12 +327,12 @@ func TestManifestStorage(t *testing.T) { } // Re-upload should restore manifest to a good state - err = ms.Put(sm) + _, err = ms.Put(ctx, sm) if err != nil { t.Errorf("Error re-uploading deleted manifest") } - exists, err = ms.Exists(dgst) + exists, err = ms.Exists(ctx, dgst) if err != nil { t.Fatalf("Error querying manifest existence") } @@ -344,7 +340,7 @@ func TestManifestStorage(t *testing.T) { t.Errorf("Restored manifest should exist") } - deletedManifest, err = ms.Get(dgst) + deletedManifest, err = ms.Get(ctx, dgst) if err != nil { t.Errorf("Unexpected error getting manifest") } @@ -364,7 +360,7 @@ func TestManifestStorage(t *testing.T) { if err != nil { t.Fatal(err) } - err = ms.Delete(dgst) + err = ms.Delete(ctx, dgst) if err == nil { t.Errorf("Unexpected success deleting while disabled") } @@ -381,15 +377,15 @@ func TestLinkPathFuncs(t *testing.T) { }{ { repo: "foo/bar", - digest: "sha256:deadbeaf", + digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", linkPathFn: blobLinkPath, - expected: "/docker/registry/v2/repositories/foo/bar/_layers/sha256/deadbeaf/link", + expected: "/docker/registry/v2/repositories/foo/bar/_layers/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", }, { repo: "foo/bar", - digest: "sha256:deadbeaf", + digest: "sha256:deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", linkPathFn: manifestRevisionLinkPath, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf/link", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/deadbeaf98fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/link", }, } { p, err := testcase.linkPathFn(testcase.repo, testcase.digest) diff --git a/vendor/github.com/docker/distribution/registry/storage/paths.go b/vendor/github.com/docker/distribution/registry/storage/paths.go index e90a1993..4d2d48c1 100644 --- a/vendor/github.com/docker/distribution/registry/storage/paths.go +++ b/vendor/github.com/docker/distribution/registry/storage/paths.go @@ -396,9 +396,8 @@ type layerLinkPathSpec struct { func (layerLinkPathSpec) pathSpec() {} // blobAlgorithmReplacer does some very simple path sanitization for user -// input. Mostly, this is to provide some hierarchy for tarsum digests. Paths -// should be "safe" before getting this far due to strict digest requirements -// but we can add further path conversion here, if needed. +// input. Paths should be "safe" before getting this far due to strict digest +// requirements but we can add further path conversion here, if needed. var blobAlgorithmReplacer = strings.NewReplacer( "+", "/", ".", "/", @@ -468,10 +467,6 @@ func (repositoriesRootPathSpec) pathSpec() {} // // / // -// Most importantly, for tarsum, the layout looks like this: -// -// tarsum/// -// // If multilevel is true, the first two bytes of the digest will separate // groups of digest folder. It will be as follows: // @@ -494,19 +489,5 @@ func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) suffix = append(suffix, hex) - if tsi, err := digest.ParseTarSum(dgst.String()); err == nil { - // We have a tarsum! - version := tsi.Version - if version == "" { - version = "v0" - } - - prefix = []string{ - "tarsum", - version, - tsi.Algorithm, - } - } - return append(prefix, suffix...), nil } diff --git a/vendor/github.com/docker/distribution/registry/storage/paths_test.go b/vendor/github.com/docker/distribution/registry/storage/paths_test.go index 9e91a3fa..2ad78e9d 100644 --- a/vendor/github.com/docker/distribution/registry/storage/paths_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/paths_test.go @@ -2,8 +2,6 @@ package storage import ( "testing" - - "github.com/docker/distribution/digest" ) func TestPathMapper(t *testing.T) { @@ -15,31 +13,31 @@ func TestPathMapper(t *testing.T) { { spec: manifestRevisionPathSpec{ name: "foo/bar", - revision: "sha256:abcdef0123456789", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, { spec: manifestRevisionLinkPathSpec{ name: "foo/bar", - revision: "sha256:abcdef0123456789", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/link", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", }, { spec: manifestSignatureLinkPathSpec{ name: "foo/bar", - revision: "sha256:abcdef0123456789", - signature: "sha256:abcdef0123456789", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + signature: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures/sha256/abcdef0123456789/link", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/signatures/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", }, { spec: manifestSignaturesPathSpec{ name: "foo/bar", - revision: "sha256:abcdef0123456789", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789/signatures", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/revisions/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/signatures", }, { spec: manifestTagsPathSpec{ @@ -72,36 +70,17 @@ func TestPathMapper(t *testing.T) { spec: manifestTagIndexEntryPathSpec{ name: "foo/bar", tag: "thetag", - revision: "sha256:abcdef0123456789", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, { spec: manifestTagIndexEntryLinkPathSpec{ name: "foo/bar", tag: "thetag", - revision: "sha256:abcdef0123456789", + revision: "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", }, - expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789/link", - }, - { - spec: layerLinkPathSpec{ - name: "foo/bar", - digest: "tarsum.v1+test:abcdef", - }, - expected: "/docker/registry/v2/repositories/foo/bar/_layers/tarsum/v1/test/abcdef/link", - }, - { - spec: blobDataPathSpec{ - digest: digest.Digest("tarsum.dev+sha512:abcdefabcdefabcdef908909909"), - }, - expected: "/docker/registry/v2/blobs/tarsum/dev/sha512/ab/abcdefabcdefabcdef908909909/data", - }, - { - spec: blobDataPathSpec{ - digest: digest.Digest("tarsum.v1+sha256:abcdefabcdefabcdef908909909"), - }, - expected: "/docker/registry/v2/blobs/tarsum/v1/sha256/ab/abcdefabcdefabcdef908909909/data", + expected: "/docker/registry/v2/repositories/foo/bar/_manifests/tags/thetag/index/sha256/abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789/link", }, { diff --git a/vendor/github.com/docker/distribution/registry/storage/registry.go b/vendor/github.com/docker/distribution/registry/storage/registry.go index 5ef06d53..be570cbc 100644 --- a/vendor/github.com/docker/distribution/registry/storage/registry.go +++ b/vendor/github.com/docker/distribution/registry/storage/registry.go @@ -107,18 +107,11 @@ func (reg *registry) Scope() distribution.Scope { // Repository returns an instance of the repository tied to the registry. // Instances should not be shared between goroutines but are cheap to // allocate. In general, they should be request scoped. -func (reg *registry) Repository(ctx context.Context, canonicalName string) (distribution.Repository, error) { - if _, err := reference.ParseNamed(canonicalName); err != nil { - return nil, distribution.ErrRepositoryNameInvalid{ - Name: canonicalName, - Reason: err, - } - } - +func (reg *registry) Repository(ctx context.Context, canonicalName reference.Named) (distribution.Repository, error) { var descriptorCache distribution.BlobDescriptorService if reg.blobDescriptorCacheProvider != nil { var err error - descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName) + descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName.Name()) if err != nil { return nil, err } @@ -136,15 +129,24 @@ func (reg *registry) Repository(ctx context.Context, canonicalName string) (dist type repository struct { *registry ctx context.Context - name string + name reference.Named descriptorCache distribution.BlobDescriptorService } // Name returns the name of the repository. -func (repo *repository) Name() string { +func (repo *repository) Name() reference.Named { return repo.name } +func (repo *repository) Tags(ctx context.Context) distribution.TagService { + tags := &tagStore{ + repository: repo, + blobStore: repo.registry.blobStore, + } + + return tags +} + // Manifests returns an instance of ManifestService. Instantiation is cheap and // may be context sensitive in the future. The instance should be used similar // to a request local. @@ -156,39 +158,51 @@ func (repo *repository) Manifests(ctx context.Context, options ...distribution.M blobLinkPath, } + blobStore := &linkedBlobStore{ + ctx: ctx, + blobStore: repo.blobStore, + repository: repo, + deleteEnabled: repo.registry.deleteEnabled, + blobAccessController: &linkedBlobStatter{ + blobStore: repo.blobStore, + repository: repo, + linkPathFns: manifestLinkPathFns, + }, + + // TODO(stevvooe): linkPath limits this blob store to only + // manifests. This instance cannot be used for blob checks. + linkPathFns: manifestLinkPathFns, + } + ms := &manifestStore{ ctx: ctx, repository: repo, - revisionStore: &revisionStore{ + blobStore: blobStore, + schema1Handler: &signedManifestHandler{ ctx: ctx, repository: repo, - blobStore: &linkedBlobStore{ - ctx: ctx, - blobStore: repo.blobStore, - repository: repo, - deleteEnabled: repo.registry.deleteEnabled, - blobAccessController: &linkedBlobStatter{ - blobStore: repo.blobStore, - repository: repo, - linkPathFns: manifestLinkPathFns, - }, - - // TODO(stevvooe): linkPath limits this blob store to only - // manifests. This instance cannot be used for blob checks. - linkPathFns: manifestLinkPathFns, - resumableDigestEnabled: repo.resumableDigestEnabled, + blobStore: blobStore, + signatures: &signatureStore{ + ctx: ctx, + repository: repo, + blobStore: repo.blobStore, }, }, - tagStore: &tagStore{ + schema2Handler: &schema2ManifestHandler{ ctx: ctx, repository: repo, - blobStore: repo.registry.blobStore, + blobStore: blobStore, + }, + manifestListHandler: &manifestListHandler{ + ctx: ctx, + repository: repo, + blobStore: blobStore, }, } // Apply options for _, option := range options { - err := option(ms) + err := option.Apply(ms) if err != nil { return nil, err } @@ -212,6 +226,7 @@ func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { } return &linkedBlobStore{ + registry: repo.registry, blobStore: repo.blobStore, blobServer: repo.blobServer, blobAccessController: statter, @@ -225,11 +240,3 @@ func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore { resumableDigestEnabled: repo.resumableDigestEnabled, } } - -func (repo *repository) Signatures() distribution.SignatureService { - return &signatureStore{ - repository: repo, - blobStore: repo.blobStore, - ctx: repo.ctx, - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/revisionstore.go b/vendor/github.com/docker/distribution/registry/storage/revisionstore.go deleted file mode 100644 index ed2d5dd3..00000000 --- a/vendor/github.com/docker/distribution/registry/storage/revisionstore.go +++ /dev/null @@ -1,111 +0,0 @@ -package storage - -import ( - "encoding/json" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/libtrust" -) - -// revisionStore supports storing and managing manifest revisions. -type revisionStore struct { - repository *repository - blobStore *linkedBlobStore - ctx context.Context -} - -// get retrieves the manifest, keyed by revision digest. -func (rs *revisionStore) get(ctx context.Context, revision digest.Digest) (*schema1.SignedManifest, error) { - // Ensure that this revision is available in this repository. - _, err := rs.blobStore.Stat(ctx, revision) - if err != nil { - if err == distribution.ErrBlobUnknown { - return nil, distribution.ErrManifestUnknownRevision{ - Name: rs.repository.Name(), - Revision: revision, - } - } - - return nil, err - } - - // TODO(stevvooe): Need to check descriptor from above to ensure that the - // mediatype is as we expect for the manifest store. - - content, err := rs.blobStore.Get(ctx, revision) - if err != nil { - if err == distribution.ErrBlobUnknown { - return nil, distribution.ErrManifestUnknownRevision{ - Name: rs.repository.Name(), - Revision: revision, - } - } - - return nil, err - } - - // Fetch the signatures for the manifest - signatures, err := rs.repository.Signatures().Get(revision) - if err != nil { - return nil, err - } - - jsig, err := libtrust.NewJSONSignature(content, signatures...) - if err != nil { - return nil, err - } - - // Extract the pretty JWS - raw, err := jsig.PrettySignature("signatures") - if err != nil { - return nil, err - } - - var sm schema1.SignedManifest - if err := json.Unmarshal(raw, &sm); err != nil { - return nil, err - } - - return &sm, nil -} - -// put stores the manifest in the repository, if not already present. Any -// updated signatures will be stored, as well. -func (rs *revisionStore) put(ctx context.Context, sm *schema1.SignedManifest) (distribution.Descriptor, error) { - // Resolve the payload in the manifest. - payload, err := sm.Payload() - if err != nil { - return distribution.Descriptor{}, err - } - - // Digest and store the manifest payload in the blob store. - revision, err := rs.blobStore.Put(ctx, schema1.ManifestMediaType, payload) - if err != nil { - context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) - return distribution.Descriptor{}, err - } - - // Link the revision into the repository. - if err := rs.blobStore.linkBlob(ctx, revision); err != nil { - return distribution.Descriptor{}, err - } - - // Grab each json signature and store them. - signatures, err := sm.Signatures() - if err != nil { - return distribution.Descriptor{}, err - } - - if err := rs.repository.Signatures().Put(revision.Digest, signatures...); err != nil { - return distribution.Descriptor{}, err - } - - return revision, nil -} - -func (rs *revisionStore) delete(ctx context.Context, revision digest.Digest) error { - return rs.blobStore.Delete(ctx, revision) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go new file mode 100644 index 00000000..115786e2 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/schema2manifesthandler.go @@ -0,0 +1,99 @@ +package storage + +import ( + "fmt" + + "encoding/json" + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema2" +) + +//schema2ManifestHandler is a ManifestHandler that covers schema2 manifests. +type schema2ManifestHandler struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context +} + +var _ ManifestHandler = &schema2ManifestHandler{} + +func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal") + + var m schema2.DeserializedManifest + if err := json.Unmarshal(content, &m); err != nil { + return nil, err + } + + return &m, nil +} + +func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put") + + m, ok := manifest.(*schema2.DeserializedManifest) + if !ok { + return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil { + return "", err + } + + mt, payload, err := m.Payload() + if err != nil { + return "", err + } + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + // Link the revision into the repository. + if err := ms.blobStore.linkBlob(ctx, revision); err != nil { + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. As a policy, the registry only tries to store +// valid content, leaving trust policies of that content up to consumers. +func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if !skipDependencyVerification { + target := mnfst.Target() + _, err := ms.repository.Blobs(ctx).Stat(ctx, target.Digest) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: target.Digest}) + } + + for _, fsLayer := range mnfst.References() { + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/signaturestore.go b/vendor/github.com/docker/distribution/registry/storage/signaturestore.go index f5888f64..205d6009 100644 --- a/vendor/github.com/docker/distribution/registry/storage/signaturestore.go +++ b/vendor/github.com/docker/distribution/registry/storage/signaturestore.go @@ -4,7 +4,6 @@ import ( "path" "sync" - "github.com/docker/distribution" "github.com/docker/distribution/context" "github.com/docker/distribution/digest" ) @@ -15,19 +14,9 @@ type signatureStore struct { ctx context.Context } -func newSignatureStore(ctx context.Context, repo *repository, blobStore *blobStore) *signatureStore { - return &signatureStore{ - ctx: ctx, - repository: repo, - blobStore: blobStore, - } -} - -var _ distribution.SignatureService = &signatureStore{} - func (s *signatureStore) Get(dgst digest.Digest) ([][]byte, error) { signaturesPath, err := pathFor(manifestSignaturesPathSpec{ - name: s.repository.Name(), + name: s.repository.Name().Name(), revision: dgst, }) diff --git a/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go b/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go new file mode 100644 index 00000000..02663226 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/signedmanifesthandler.go @@ -0,0 +1,150 @@ +package storage + +import ( + "encoding/json" + "fmt" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/libtrust" +) + +// signedManifestHandler is a ManifestHandler that covers schema1 manifests. It +// can unmarshal and put schema1 manifests that have been signed by libtrust. +type signedManifestHandler struct { + repository *repository + blobStore *linkedBlobStore + ctx context.Context + signatures *signatureStore +} + +var _ ManifestHandler = &signedManifestHandler{} + +func (ms *signedManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) { + context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Unmarshal") + // Fetch the signatures for the manifest + signatures, err := ms.signatures.Get(dgst) + if err != nil { + return nil, err + } + + jsig, err := libtrust.NewJSONSignature(content, signatures...) + if err != nil { + return nil, err + } + + // Extract the pretty JWS + raw, err := jsig.PrettySignature("signatures") + if err != nil { + return nil, err + } + + var sm schema1.SignedManifest + if err := json.Unmarshal(raw, &sm); err != nil { + return nil, err + } + return &sm, nil +} + +func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) { + context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put") + + sm, ok := manifest.(*schema1.SignedManifest) + if !ok { + return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest) + } + + if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil { + return "", err + } + + mt := schema1.MediaTypeManifest + payload := sm.Canonical + + revision, err := ms.blobStore.Put(ctx, mt, payload) + if err != nil { + context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err) + return "", err + } + + // Link the revision into the repository. + if err := ms.blobStore.linkBlob(ctx, revision); err != nil { + return "", err + } + + // Grab each json signature and store them. + signatures, err := sm.Signatures() + if err != nil { + return "", err + } + + if err := ms.signatures.Put(revision.Digest, signatures...); err != nil { + return "", err + } + + return revision.Digest, nil +} + +// verifyManifest ensures that the manifest content is valid from the +// perspective of the registry. It ensures that the signature is valid for the +// enclosed payload. As a policy, the registry only tries to store valid +// content, leaving trust policies of that content up to consumers. +func (ms *signedManifestHandler) verifyManifest(ctx context.Context, mnfst schema1.SignedManifest, skipDependencyVerification bool) error { + var errs distribution.ErrManifestVerification + + if len(mnfst.Name) > reference.NameTotalLengthMax { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("manifest name must not be more than %v characters", reference.NameTotalLengthMax), + }) + } + + if !reference.NameRegexp.MatchString(mnfst.Name) { + errs = append(errs, + distribution.ErrManifestNameInvalid{ + Name: mnfst.Name, + Reason: fmt.Errorf("invalid manifest name format"), + }) + } + + if len(mnfst.History) != len(mnfst.FSLayers) { + errs = append(errs, fmt.Errorf("mismatched history and fslayer cardinality %d != %d", + len(mnfst.History), len(mnfst.FSLayers))) + } + + if _, err := schema1.Verify(&mnfst); err != nil { + switch err { + case libtrust.ErrMissingSignatureKey, libtrust.ErrInvalidJSONContent, libtrust.ErrMissingSignatureKey: + errs = append(errs, distribution.ErrManifestUnverified{}) + default: + if err.Error() == "invalid signature" { // TODO(stevvooe): This should be exported by libtrust + errs = append(errs, distribution.ErrManifestUnverified{}) + } else { + errs = append(errs, err) + } + } + } + + if !skipDependencyVerification { + for _, fsLayer := range mnfst.References() { + _, err := ms.repository.Blobs(ctx).Stat(ctx, fsLayer.Digest) + if err != nil { + if err != distribution.ErrBlobUnknown { + errs = append(errs, err) + } + + // On error here, we always append unknown blob errors. + errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: fsLayer.Digest}) + } + } + } + if len(errs) != 0 { + return errs + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/tagstore.go b/vendor/github.com/docker/distribution/registry/storage/tagstore.go index aec95286..8381d244 100644 --- a/vendor/github.com/docker/distribution/registry/storage/tagstore.go +++ b/vendor/github.com/docker/distribution/registry/storage/tagstore.go @@ -9,37 +9,41 @@ import ( storagedriver "github.com/docker/distribution/registry/storage/driver" ) +var _ distribution.TagService = &tagStore{} + // tagStore provides methods to manage manifest tags in a backend storage driver. +// This implementation uses the same on-disk layout as the (now deleted) tag +// store. This provides backward compatibility with current registry deployments +// which only makes use of the Digest field of the returned distribution.Descriptor +// but does not enable full roundtripping of Descriptor objects type tagStore struct { repository *repository blobStore *blobStore - ctx context.Context } -// tags lists the manifest tags for the specified repository. -func (ts *tagStore) tags() ([]string, error) { - p, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Name(), - }) +// All returns all tags +func (ts *tagStore) All(ctx context.Context) ([]string, error) { + var tags []string + pathSpec, err := pathFor(manifestTagPathSpec{ + name: ts.repository.Name().Name(), + }) if err != nil { - return nil, err + return tags, err } - var tags []string - entries, err := ts.blobStore.driver.List(ts.ctx, p) + entries, err := ts.blobStore.driver.List(ctx, pathSpec) if err != nil { switch err := err.(type) { case storagedriver.PathNotFoundError: - return nil, distribution.ErrRepositoryUnknown{Name: ts.repository.Name()} + return tags, distribution.ErrRepositoryUnknown{Name: ts.repository.Name().Name()} default: - return nil, err + return tags, err } } for _, entry := range entries { _, filename := path.Split(entry) - tags = append(tags, filename) } @@ -47,9 +51,9 @@ func (ts *tagStore) tags() ([]string, error) { } // exists returns true if the specified manifest tag exists in the repository. -func (ts *tagStore) exists(tag string) (bool, error) { +func (ts *tagStore) exists(ctx context.Context, tag string) (bool, error) { tagPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name(), + name: ts.repository.Name().Name(), tag: tag, }) @@ -57,7 +61,7 @@ func (ts *tagStore) exists(tag string) (bool, error) { return false, err } - exists, err := exists(ts.ctx, ts.blobStore.driver, tagPath) + exists, err := exists(ctx, ts.blobStore.driver, tagPath) if err != nil { return false, err } @@ -65,11 +69,11 @@ func (ts *tagStore) exists(tag string) (bool, error) { return exists, nil } -// tag tags the digest with the given tag, updating the the store to point at +// Tag tags the digest with the given tag, updating the the store to point at // the current tag. The digest must point to a manifest. -func (ts *tagStore) tag(tag string, revision digest.Digest) error { +func (ts *tagStore) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name(), + name: ts.repository.Name().Name(), tag: tag, }) @@ -77,53 +81,58 @@ func (ts *tagStore) tag(tag string, revision digest.Digest) error { return err } - nbs := ts.linkedBlobStore(ts.ctx, tag) + lbs := ts.linkedBlobStore(ctx, tag) + // Link into the index - if err := nbs.linkBlob(ts.ctx, distribution.Descriptor{Digest: revision}); err != nil { + if err := lbs.linkBlob(ctx, desc); err != nil { return err } // Overwrite the current link - return ts.blobStore.link(ts.ctx, currentPath, revision) + return ts.blobStore.link(ctx, currentPath, desc.Digest) } // resolve the current revision for name and tag. -func (ts *tagStore) resolve(tag string) (digest.Digest, error) { +func (ts *tagStore) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { currentPath, err := pathFor(manifestTagCurrentPathSpec{ - name: ts.repository.Name(), + name: ts.repository.Name().Name(), tag: tag, }) if err != nil { - return "", err + return distribution.Descriptor{}, err } - revision, err := ts.blobStore.readlink(ts.ctx, currentPath) + revision, err := ts.blobStore.readlink(ctx, currentPath) if err != nil { switch err.(type) { case storagedriver.PathNotFoundError: - return "", distribution.ErrManifestUnknown{Name: ts.repository.Name(), Tag: tag} + return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} } - return "", err + return distribution.Descriptor{}, err } - return revision, nil + return distribution.Descriptor{Digest: revision}, nil } -// delete removes the tag from repository, including the history of all -// revisions that have the specified tag. -func (ts *tagStore) delete(tag string) error { +// Untag removes the tag association +func (ts *tagStore) Untag(ctx context.Context, tag string) error { tagPath, err := pathFor(manifestTagPathSpec{ - name: ts.repository.Name(), + name: ts.repository.Name().Name(), tag: tag, }) - if err != nil { + switch err.(type) { + case storagedriver.PathNotFoundError: + return distribution.ErrTagUnknown{Tag: tag} + case nil: + break + default: return err } - return ts.blobStore.driver.Delete(ts.ctx, tagPath) + return ts.blobStore.driver.Delete(ctx, tagPath) } // linkedBlobStore returns the linkedBlobStore for the named tag, allowing one @@ -145,3 +154,38 @@ func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlob }}, } } + +// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by +// digest, tag entries which point to it need to be recovered to avoid dangling tags. +func (ts *tagStore) Lookup(ctx context.Context, desc distribution.Descriptor) ([]string, error) { + allTags, err := ts.All(ctx) + switch err.(type) { + case distribution.ErrRepositoryUnknown: + // This tag store has been initialized but not yet populated + break + case nil: + break + default: + return nil, err + } + + var tags []string + for _, tag := range allTags { + tagLinkPathSpec := manifestTagCurrentPathSpec{ + name: ts.repository.Name().Name(), + tag: tag, + } + + tagLinkPath, err := pathFor(tagLinkPathSpec) + tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath) + if err != nil { + return nil, err + } + + if tagDigest == desc.Digest { + tags = append(tags, tag) + } + } + + return tags, nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go b/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go new file mode 100644 index 00000000..52873a69 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/tagstore_test.go @@ -0,0 +1,208 @@ +package storage + +import ( + "testing" + + "github.com/docker/distribution" + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/driver/inmemory" +) + +type tagsTestEnv struct { + ts distribution.TagService + ctx context.Context +} + +func testTagStore(t *testing.T) *tagsTestEnv { + ctx := context.Background() + d := inmemory.New() + reg, err := NewRegistry(ctx, d) + if err != nil { + t.Fatal(err) + } + + repoRef, _ := reference.ParseNamed("a/b") + repo, err := reg.Repository(ctx, repoRef) + if err != nil { + t.Fatal(err) + } + + return &tagsTestEnv{ + ctx: ctx, + ts: repo.Tags(ctx), + } +} + +func TestTagStoreTag(t *testing.T) { + env := testTagStore(t) + tags := env.ts + ctx := env.ctx + + d := distribution.Descriptor{} + err := tags.Tag(ctx, "latest", d) + if err == nil { + t.Errorf("unexpected error putting malformed descriptor : %s", err) + } + + d.Digest = "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + err = tags.Tag(ctx, "latest", d) + if err != nil { + t.Error(err) + } + + d1, err := tags.Get(ctx, "latest") + if err != nil { + t.Error(err) + } + + if d1.Digest != d.Digest { + t.Error("put and get digest differ") + } + + // Overwrite existing + d.Digest = "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + err = tags.Tag(ctx, "latest", d) + if err != nil { + t.Error(err) + } + + d1, err = tags.Get(ctx, "latest") + if err != nil { + t.Error(err) + } + + if d1.Digest != d.Digest { + t.Error("put and get digest differ") + } +} + +func TestTagStoreUnTag(t *testing.T) { + env := testTagStore(t) + tags := env.ts + ctx := env.ctx + desc := distribution.Descriptor{Digest: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"} + + err := tags.Untag(ctx, "latest") + if err == nil { + t.Errorf("Expected error untagging non-existant tag") + } + + err = tags.Tag(ctx, "latest", desc) + if err != nil { + t.Error(err) + } + + err = tags.Untag(ctx, "latest") + if err != nil { + t.Error(err) + } + + _, err = tags.Get(ctx, "latest") + if err == nil { + t.Error("Expected error getting untagged tag") + } +} + +func TestTagStoreAll(t *testing.T) { + env := testTagStore(t) + tagStore := env.ts + ctx := env.ctx + + alpha := "abcdefghijklmnopqrstuvwxyz" + for i := 0; i < len(alpha); i++ { + tag := alpha[i] + desc := distribution.Descriptor{Digest: "sha256:eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"} + err := tagStore.Tag(ctx, string(tag), desc) + if err != nil { + t.Error(err) + } + } + + all, err := tagStore.All(ctx) + if err != nil { + t.Error(err) + } + if len(all) != len(alpha) { + t.Errorf("Unexpected count returned from enumerate") + } + + for i, c := range all { + if c != string(alpha[i]) { + t.Errorf("unexpected tag in enumerate %s", c) + } + } + + removed := "a" + err = tagStore.Untag(ctx, removed) + if err != nil { + t.Error(err) + } + + all, err = tagStore.All(ctx) + if err != nil { + t.Error(err) + } + for _, tag := range all { + if tag == removed { + t.Errorf("unexpected tag in enumerate %s", removed) + } + } + +} + +func TestTagLookup(t *testing.T) { + env := testTagStore(t) + tagStore := env.ts + ctx := env.ctx + + descA := distribution.Descriptor{Digest: "sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"} + desc0 := distribution.Descriptor{Digest: "sha256:0000000000000000000000000000000000000000000000000000000000000000"} + + tags, err := tagStore.Lookup(ctx, descA) + if err != nil { + t.Fatal(err) + } + if len(tags) != 0 { + t.Fatalf("Lookup returned > 0 tags from empty store") + } + + err = tagStore.Tag(ctx, "a", descA) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "b", descA) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "0", desc0) + if err != nil { + t.Fatal(err) + } + + err = tagStore.Tag(ctx, "1", desc0) + if err != nil { + t.Fatal(err) + } + + tags, err = tagStore.Lookup(ctx, descA) + if err != nil { + t.Fatal(err) + } + + if len(tags) != 2 { + t.Errorf("Lookup of descA returned %d tags, expected 2", len(tags)) + } + + tags, err = tagStore.Lookup(ctx, desc0) + if err != nil { + t.Fatal(err) + } + + if len(tags) != 2 { + t.Errorf("Lookup of descB returned %d tags, expected 2", len(tags)) + } + +} diff --git a/vendor/github.com/docker/distribution/registry/storage/walk.go b/vendor/github.com/docker/distribution/registry/storage/walk.go index 3d891276..d979796e 100644 --- a/vendor/github.com/docker/distribution/registry/storage/walk.go +++ b/vendor/github.com/docker/distribution/registry/storage/walk.go @@ -3,6 +3,7 @@ package storage import ( "errors" "fmt" + "sort" "github.com/docker/distribution/context" storageDriver "github.com/docker/distribution/registry/storage/driver" @@ -26,7 +27,12 @@ func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, if err != nil { return err } + sort.Stable(sort.StringSlice(children)) for _, child := range children { + // TODO(stevvooe): Calling driver.Stat for every entry is quite + // expensive when running against backends with a slow Stat + // implementation, such as s3. This is very likely a serious + // performance bottleneck. fileInfo, err := driver.Stat(ctx, child) if err != nil { return err @@ -38,7 +44,9 @@ func Walk(ctx context.Context, driver storageDriver.StorageDriver, from string, } if fileInfo.IsDir() && !skipDir { - Walk(ctx, driver, child, f) + if err := Walk(ctx, driver, child, f); err != nil { + return err + } } } return nil diff --git a/vendor/github.com/docker/distribution/registry/storage/walk_test.go b/vendor/github.com/docker/distribution/registry/storage/walk_test.go index 40b8547c..42f67dba 100644 --- a/vendor/github.com/docker/distribution/registry/storage/walk_test.go +++ b/vendor/github.com/docker/distribution/registry/storage/walk_test.go @@ -2,6 +2,7 @@ package storage import ( "fmt" + "sort" "testing" "github.com/docker/distribution/context" @@ -11,14 +12,7 @@ import ( func testFS(t *testing.T) (driver.StorageDriver, map[string]string, context.Context) { d := inmemory.New() - c := []byte("") ctx := context.Background() - if err := d.PutContent(ctx, "/a/b/c/d", c); err != nil { - t.Fatalf("Unable to put to inmemory fs") - } - if err := d.PutContent(ctx, "/a/b/c/e", c); err != nil { - t.Fatalf("Unable to put to inmemory fs") - } expected := map[string]string{ "/a": "dir", @@ -26,6 +20,22 @@ func testFS(t *testing.T) (driver.StorageDriver, map[string]string, context.Cont "/a/b/c": "dir", "/a/b/c/d": "file", "/a/b/c/e": "file", + "/a/b/f": "dir", + "/a/b/f/g": "file", + "/a/b/f/h": "file", + "/a/b/f/i": "file", + "/z": "dir", + "/z/y": "file", + } + + for p, typ := range expected { + if typ != "file" { + continue + } + + if err := d.PutContent(ctx, p, []byte(p)); err != nil { + t.Fatalf("unable to put content into fixture: %v", err) + } } return d, expected, ctx @@ -41,19 +51,26 @@ func TestWalkErrors(t *testing.T) { t.Error("Expected invalid root err") } + errEarlyExpected := fmt.Errorf("Early termination") + err = Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { // error on the 2nd file if fileInfo.Path() == "/a/b" { - return fmt.Errorf("Early termination") + return errEarlyExpected } + delete(expected, fileInfo.Path()) return nil }) if len(expected) != fileCount-1 { t.Error("Walk failed to terminate with error") } - if err != nil { - t.Error(err.Error()) + if err != errEarlyExpected { + if err == nil { + t.Fatalf("expected an error due to early termination") + } else { + t.Error(err.Error()) + } } err = Walk(ctx, d, "/nonexistant", func(fileInfo driver.FileInfo) error { @@ -67,6 +84,7 @@ func TestWalkErrors(t *testing.T) { func TestWalk(t *testing.T) { d, expected, ctx := testFS(t) + var traversed []string err := Walk(ctx, d, "/", func(fileInfo driver.FileInfo) error { filePath := fileInfo.Path() filetype, ok := expected[filePath] @@ -82,13 +100,26 @@ func TestWalk(t *testing.T) { if filetype != "file" { t.Errorf("Unexpected file type: %q", filePath) } + + // each file has its own path as the contents. If the length + // doesn't match the path length, fail. + if fileInfo.Size() != int64(len(fileInfo.Path())) { + t.Fatalf("unexpected size for %q: %v != %v", + fileInfo.Path(), fileInfo.Size(), len(fileInfo.Path())) + } } delete(expected, filePath) + traversed = append(traversed, filePath) return nil }) if len(expected) > 0 { t.Errorf("Missed files in walk: %q", expected) } + + if !sort.StringsAreSorted(traversed) { + t.Errorf("result should be sorted: %v", traversed) + } + if err != nil { t.Fatalf(err.Error()) } diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go new file mode 100644 index 00000000..50305659 --- /dev/null +++ b/vendor/github.com/docker/distribution/tags.go @@ -0,0 +1,27 @@ +package distribution + +import ( + "github.com/docker/distribution/context" +) + +// TagService provides access to information about tagged objects. +type TagService interface { + // Get retrieves the descriptor identified by the tag. Some + // implementations may differentiate between "trusted" tags and + // "untrusted" tags. If a tag is "untrusted", the mapping will be returned + // as an ErrTagUntrusted error, with the target descriptor. + Get(ctx context.Context, tag string) (Descriptor, error) + + // Tag associates the tag with the provided descriptor, updating the + // current association, if needed. + Tag(ctx context.Context, tag string, desc Descriptor) error + + // Untag removes the given tag association + Untag(ctx context.Context, tag string) error + + // All returns the set of tags managed by this tag service + All(ctx context.Context) ([]string, error) + + // Lookup returns the set of tags referencing the given digest. + Lookup(ctx context.Context, digest Descriptor) ([]string, error) +} diff --git a/vendor/github.com/docker/libnetwork/.dockerignore b/vendor/github.com/docker/libnetwork/.dockerignore new file mode 100644 index 00000000..72e8ffc0 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/.dockerignore @@ -0,0 +1 @@ +* diff --git a/vendor/github.com/docker/libnetwork/.gitignore b/vendor/github.com/docker/libnetwork/.gitignore index 0e0d4892..f9cd104f 100644 --- a/vendor/github.com/docker/libnetwork/.gitignore +++ b/vendor/github.com/docker/libnetwork/.gitignore @@ -2,11 +2,14 @@ *.o *.a *.so +bin/ # Folders integration-tmp/ _obj _test +.vagrant + # Architecture specific extensions/prefixes *.[568vq] @@ -33,4 +36,4 @@ cmd/dnet/dnet .project .settings/ -libnetwork-build.created +libnetworkbuild.created diff --git a/vendor/github.com/docker/libnetwork/CHANGELOG.md b/vendor/github.com/docker/libnetwork/CHANGELOG.md new file mode 100644 index 00000000..ea136da5 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/CHANGELOG.md @@ -0,0 +1,55 @@ +# Changelog + +## 0.5.6 (2016-01-14) +- Setup embedded DNS server correctly on container restart. Fixes docker/docker#19354 + +## 0.5.5 (2016-01-14) +- Allow network-scoped alias to be resolved for anonymous endpoint +- Self repair corrupted IP database that could happen in 1.9.0 & 1.9.1 +- Skip IPTables cleanup if --iptables=false is set. Fixes docker/docker#19063 + +## 0.5.4 (2016-01-12) +- Removed the isNodeAlive protection when user forces an endpoint delete + +## 0.5.3 (2016-01-12) +- Bridge driver supporting internal network option +- Backend implementation to support "force" option to network disconnect +- Fixing a regex in etchosts package to fix docker/docker#19080 + +## 0.5.2 (2016-01-08) +- Embedded DNS replacing /etc/hosts based Service Discovery +- Container local alias and Network-scoped alias support +- Backend support for internal network mode +- Support for IPAM driver options +- Fixes overlay veth cleanup issue : docker/docker#18814 +- fixes docker/docker#19139 +- disable IPv6 Duplicate Address Detection + +## 0.5.1 (2015-12-07) +- Allowing user to assign IP Address for containers +- Fixes docker/docker#18214 +- Fixes docker/docker#18380 + +## 0.5.0 (2015-10-30) + +- Docker multi-host networking exiting experimental channel +- Introduced IP Address Management and IPAM drivers +- DEPRECATE service discovery from default bridge network +- Introduced new network UX +- Support for multiple networks in bridge driver +- Local persistance with boltdb + +## 0.4.0 (2015-07-24) + +- Introduce experimental version of Overlay driver +- Introduce experimental version of network plugins +- Introduce experimental version of network & service UX +- Introduced experimental /etc/hosts based service discovery +- Integrated with libkv +- Improving test coverage +- Fixed a bunch of issues with osl namespace mgmt + +## 0.3.0 (2015-05-27) + +- Introduce CNM (Container Networking Model) +- Replace docker networking with CNM & Bridge driver diff --git a/vendor/github.com/docker/libnetwork/Dockerfile.build b/vendor/github.com/docker/libnetwork/Dockerfile.build new file mode 100644 index 00000000..2b767c2a --- /dev/null +++ b/vendor/github.com/docker/libnetwork/Dockerfile.build @@ -0,0 +1,13 @@ +FROM golang:1.4-cross +RUN apt-get update && apt-get -y install iptables + +RUN cd /go/src && mkdir -p golang.org/x && \ + cd golang.org/x && git clone https://github.com/golang/tools && \ + cd tools && git checkout release-branch.go1.5 + +RUN go get github.com/tools/godep \ + github.com/golang/lint/golint \ + golang.org/x/tools/cmd/vet \ + golang.org/x/tools/cmd/goimports \ + golang.org/x/tools/cmd/cover\ + github.com/mattn/goveralls diff --git a/vendor/github.com/docker/libnetwork/MAINTAINERS b/vendor/github.com/docker/libnetwork/MAINTAINERS index 69f1e9b8..33f2dd2e 100644 --- a/vendor/github.com/docker/libnetwork/MAINTAINERS +++ b/vendor/github.com/docker/libnetwork/MAINTAINERS @@ -1,5 +1,52 @@ -Alessandro Boch (@aboch) -Alexandr Morozov (@LK4D4) -Arnaud Porterie (@icecrime) -Jana Radhakrishnan (@mrjana) -Madhu Venugopal (@mavenugo) +# Libnetwork maintainers file +# +# This file describes who runs the docker/libnetwork project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aboch", + "LK4D4", + "icecrime", + "mrjana", + "mavenugo", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aboch] + Name = "Alessandro Boch" + Email = "aboch@docker.com" + GitHub = "aboch" + + [people.LK4D4] + Name = "Alexandr Morozov" + Email = "lk4d4@docker.com" + GitHub = "LK4D4" + + [people.icecrime] + Name = "Arnaud Porterie" + Email = "arnaud@docker.com" + GitHub = "icecrime" + + [people.mrjana] + Name = "Jana Radhakrishnan" + Email = "mrjana@docker.com" + GitHub = "mrjana" + + [people.mavenugo] + Name = "Madhu Venugopal" + Email = "madhu@docker.com" + GitHub = "mavenugo" diff --git a/vendor/github.com/docker/libnetwork/Makefile b/vendor/github.com/docker/libnetwork/Makefile index b1eabf52..0741af2f 100644 --- a/vendor/github.com/docker/libnetwork/Makefile +++ b/vendor/github.com/docker/libnetwork/Makefile @@ -1,32 +1,19 @@ -.PHONY: all all-local build build-local check check-code check-format run-tests check-local integration-tests install-deps coveralls circle-ci start-services clean +.PHONY: all all-local build build-local clean cross cross-local check check-code check-format run-tests integration-tests check-local coveralls circle-ci-cross circle-ci-build circle-ci-check circle-ci SHELL=/bin/bash build_image=libnetworkbuild dockerargs = --privileged -v $(shell pwd):/go/src/github.com/docker/libnetwork -w /go/src/github.com/docker/libnetwork container_env = -e "INSIDECONTAINER=-incontainer=true" -docker = docker run --rm -it ${dockerargs} ${container_env} ${build_image} -ciargs = -e "COVERALLS_TOKEN=$$COVERALLS_TOKEN" -e "INSIDECONTAINER=-incontainer=true" -cidocker = docker run ${ciargs} ${dockerargs} golang:1.4 +docker = docker run --rm -it ${dockerargs} $$EXTRA_ARGS ${container_env} ${build_image} +ciargs = -e CIRCLECI -e "COVERALLS_TOKEN=$$COVERALLS_TOKEN" -e "INSIDECONTAINER=-incontainer=true" +cidocker = docker run ${dockerargs} ${ciargs} ${container_env} ${build_image} +CROSS_PLATFORMS = linux/amd64 linux/386 linux/arm windows/amd64 windows/386 all: ${build_image}.created build check integration-tests clean -integration-tests: ./cmd/dnet/dnet - @./test/integration/dnet/run-integration-tests.sh - -./cmd/dnet/dnet: - make build - -clean: - @if [ -e ./cmd/dnet/dnet ]; then \ - echo "Removing dnet binary"; \ - rm -rf ./cmd/dnet/dnet; \ - fi - -all-local: check-local build-local +all-local: build-local check-local integration-tests-local clean ${build_image}.created: - docker run --name=libnetworkbuild -v $(shell pwd):/go/src/github.com/docker/libnetwork -w /go/src/github.com/docker/libnetwork golang:1.4 make install-deps - docker commit libnetworkbuild ${build_image} - docker rm libnetworkbuild + docker build -f Dockerfile.build -t ${build_image} . touch ${build_image}.created build: ${build_image}.created @@ -35,8 +22,25 @@ build: ${build_image}.created @echo "Done building code" build-local: - @$(shell which godep) go build ./... - @$(shell which godep) go build -o ./cmd/dnet/dnet ./cmd/dnet + @mkdir -p "bin" + $(shell which godep) go build -o "bin/dnet" ./cmd/dnet + +clean: + @if [ -d bin ]; then \ + echo "Removing dnet binaries"; \ + rm -rf bin; \ + fi + +cross: ${build_image}.created + @mkdir -p "bin" + @for platform in ${CROSS_PLATFORMS}; do \ + EXTRA_ARGS="-e GOOS=$${platform%/*} -e GOARCH=$${platform##*/}" ; \ + echo "$${platform}..." ; \ + ${docker} make cross-local ; \ + done + +cross-local: + $(shell which godep) go build -o "bin/dnet-$$GOOS-$$GOARCH" ./cmd/dnet check: ${build_image}.created @${docker} ./wrapmake.sh check-local @@ -71,27 +75,31 @@ run-tests: done @echo "Done running tests" -check-local: check-format check-code start-services run-tests +check-local: check-format check-code run-tests -install-deps: - apt-get update && apt-get -y install iptables zookeeperd - git clone https://github.com/golang/tools /go/src/golang.org/x/tools - go install golang.org/x/tools/cmd/vet - go install golang.org/x/tools/cmd/goimports - go install golang.org/x/tools/cmd/cover - go get github.com/tools/godep - go get github.com/golang/lint/golint - go get github.com/mattn/goveralls +integration-tests: ./bin/dnet + @./test/integration/dnet/run-integration-tests.sh + +./bin/dnet: + make build coveralls: -@goveralls -service circleci -coverprofile=coverage.coverprofile -repotoken $$COVERALLS_TOKEN # CircleCI's Docker fails when cleaning up using the --rm flag -# The following target is a workaround for this +# The following targets are a workaround for this +circle-ci-cross: ${build_image}.created + @mkdir -p "bin" + @for platform in ${CROSS_PLATFORMS}; do \ + EXTRA_ARGS="-e GOOS=$${platform%/*} -e GOARCH=$${platform##*/}" ; \ + echo "$${platform}..." ; \ + ${cidocker} make cross-local ; \ + done -circle-ci: - @${cidocker} make install-deps build-local check-local coveralls - make integration-tests +circle-ci-check: ${build_image}.created + @${cidocker} make check-local coveralls -start-services: - service zookeeper start +circle-ci-build: ${build_image}.created + @${cidocker} make build-local + +circle-ci: circle-ci-check circle-ci-build integration-tests diff --git a/vendor/github.com/docker/libnetwork/README.md b/vendor/github.com/docker/libnetwork/README.md index aa3fcbce..3f10a031 100644 --- a/vendor/github.com/docker/libnetwork/README.md +++ b/vendor/github.com/docker/libnetwork/README.md @@ -6,8 +6,6 @@ Libnetwork provides a native Go implementation for connecting containers The goal of libnetwork is to deliver a robust Container Network Model that provides a consistent programming interface and the required network abstractions for applications. -**NOTE**: libnetwork project is under heavy development and is not ready for general use. - #### Design Please refer to the [design](docs/design.md) for more information. @@ -17,6 +15,11 @@ There are many networking solutions available to suit a broad range of use-cases ```go +func main() { + if reexec.Init() { + return + } + // Select and configure the network driver networkType := "bridge" @@ -26,14 +29,14 @@ There are many networking solutions available to suit a broad range of use-cases genericOption[netlabel.GenericData] = driverOptions controller, err := libnetwork.New(config.OptionDriverConfig(networkType, genericOption)) if err != nil { - return + log.Fatalf("libnetwork.New: %s", err) } // Create a network for containers to join. // NewNetwork accepts Variadic optional arguments that libnetwork and Drivers can use. network, err := controller.NewNetwork(networkType, "network1") if err != nil { - return + log.Fatalf("controller.NewNetwork: %s", err) } // For each new container: allocate IP and interfaces. The returned network @@ -42,7 +45,7 @@ There are many networking solutions available to suit a broad range of use-cases // from the returned endpoint. ep, err := network.CreateEndpoint("Endpoint1") if err != nil { - return + log.Fatalf("network.CreateEndpoint: %s", err) } // Create the sandbox for the container. @@ -50,27 +53,30 @@ There are many networking solutions available to suit a broad range of use-cases sbx, err := controller.NewSandbox("container1", libnetwork.OptionHostname("test"), libnetwork.OptionDomainname("docker.io")) + if err != nil { + log.Fatalf("controller.NewSandbox: %s", err) + } // A sandbox can join the endpoint via the join api. err = ep.Join(sbx) if err != nil { - return + log.Fatalf("ep.Join: %s", err) } // libnetwork client can check the endpoint's operational data via the Info() API epInfo, err := ep.DriverInfo() - mapData, ok := epInfo[netlabel.PortMap] - if ok { - portMapping, ok := mapData.([]types.PortBinding) - if ok { - fmt.Printf("Current port mapping for endpoint %s: %v", ep.Name(), portMapping) - } + if err != nil { + log.Fatalf("ep.DriverInfo: %s", err) } -``` -#### Current Status -Please watch this space for updates on the progress. -Currently libnetwork is nothing more than an attempt to modularize the Docker platform's networking subsystem by moving it into libnetwork as a library. + macAddress, ok := epInfo[netlabel.MacAddress] + if !ok { + log.Fatalf("failed to get mac address from endpoint info") + } + + fmt.Printf("Joined endpoint %s (%s) to sandbox %s (%s)\n", ep.Name(), macAddress, sbx.ContainerID(), sbx.Key()) +} +``` ## Future Please refer to [roadmap](ROADMAP.md) for more information. diff --git a/vendor/github.com/docker/libnetwork/circle.yml b/vendor/github.com/docker/libnetwork/circle.yml index d02f6a92..a454d212 100644 --- a/vendor/github.com/docker/libnetwork/circle.yml +++ b/vendor/github.com/docker/libnetwork/circle.yml @@ -1,12 +1,18 @@ machine: - services: - - docker + services: + - docker dependencies: - override: - - echo "Nothing to install" + override: + - sudo apt-get update; sudo apt-get install -y iptables zookeeperd + - go get golang.org/x/tools/cmd/vet + - go get golang.org/x/tools/cmd/goimports + - go get golang.org/x/tools/cmd/cover + - go get github.com/tools/godep + - go get github.com/golang/lint/golint + - go get github.com/mattn/goveralls test: - override: - - make circle-ci + override: + - make circle-ci diff --git a/vendor/github.com/docker/libnetwork/controller.go b/vendor/github.com/docker/libnetwork/controller.go index a0cb4cb5..7efc4093 100644 --- a/vendor/github.com/docker/libnetwork/controller.go +++ b/vendor/github.com/docker/libnetwork/controller.go @@ -121,21 +121,19 @@ type driverData struct { } type ipamData struct { - driver ipamapi.Ipam + driver ipamapi.Ipam + capability *ipamapi.Capability // default address spaces are provided by ipam driver at registration time defaultLocalAddressSpace, defaultGlobalAddressSpace string } type driverTable map[string]*driverData -//type networkTable map[string]*network -//type endpointTable map[string]*endpoint type ipamTable map[string]*ipamData type sandboxTable map[string]*sandbox type controller struct { - id string - //networks networkTable + id string drivers driverTable ipamDrivers ipamTable sandboxes sandboxTable @@ -145,7 +143,7 @@ type controller struct { extKeyListener net.Listener watchCh chan *endpoint unWatchCh chan *endpoint - svcDb map[string]svcMap + svcDb map[string]svcInfo nmap map[string]*netWatch defOsSbox osl.Sandbox sboxOnce sync.Once @@ -173,7 +171,7 @@ func New(cfgOptions ...config.Option) (NetworkController, error) { sandboxes: sandboxTable{}, drivers: driverTable{}, ipamDrivers: ipamTable{}, - svcDb: make(map[string]svcMap), + svcDb: make(map[string]svcInfo), } if err := c.initStores(); err != nil { @@ -218,6 +216,31 @@ func (c *controller) validateHostDiscoveryConfig() bool { return true } +func (c *controller) clusterHostID() string { + c.Lock() + defer c.Unlock() + if c.cfg == nil || c.cfg.Cluster.Address == "" { + return "" + } + addr := strings.Split(c.cfg.Cluster.Address, ":") + return addr[0] +} + +func (c *controller) isNodeAlive(node string) bool { + if c.discovery == nil { + return false + } + + nodes := c.discovery.Fetch() + for _, n := range nodes { + if n.String() == node { + return true + } + } + + return false +} + func (c *controller) initDiscovery(watcher discovery.Watcher) error { if c.cfg == nil { return fmt.Errorf("discovery initialization requires a valid configuration") @@ -309,7 +332,7 @@ func (c *controller) RegisterDriver(networkType string, driver driverapi.Driver, return nil } -func (c *controller) RegisterIpamDriver(name string, driver ipamapi.Ipam) error { +func (c *controller) registerIpamDriver(name string, driver ipamapi.Ipam, caps *ipamapi.Capability) error { if !config.IsValidName(name) { return ErrInvalidName(name) } @@ -318,21 +341,29 @@ func (c *controller) RegisterIpamDriver(name string, driver ipamapi.Ipam) error _, ok := c.ipamDrivers[name] c.Unlock() if ok { - return driverapi.ErrActiveRegistration(name) + return types.ForbiddenErrorf("ipam driver %q already registered", name) } locAS, glbAS, err := driver.GetDefaultAddressSpaces() if err != nil { - return fmt.Errorf("ipam driver %s failed to return default address spaces: %v", name, err) + return types.InternalErrorf("ipam driver %q failed to return default address spaces: %v", name, err) } c.Lock() - c.ipamDrivers[name] = &ipamData{driver: driver, defaultLocalAddressSpace: locAS, defaultGlobalAddressSpace: glbAS} + c.ipamDrivers[name] = &ipamData{driver: driver, defaultLocalAddressSpace: locAS, defaultGlobalAddressSpace: glbAS, capability: caps} c.Unlock() - log.Debugf("Registering ipam provider: %s", name) + log.Debugf("Registering ipam driver: %q", name) return nil } +func (c *controller) RegisterIpamDriver(name string, driver ipamapi.Ipam) error { + return c.registerIpamDriver(name, driver, &ipamapi.Capability{}) +} + +func (c *controller) RegisterIpamDriverWithCapabilities(name string, driver ipamapi.Ipam, caps *ipamapi.Capability) error { + return c.registerIpamDriver(name, driver, caps) +} + // NewNetwork creates a new network of the specified network type. The options // are network specific and modeled in a generic way. func (c *controller) NewNetwork(networkType, name string, options ...NetworkOption) (Network, error) { @@ -670,7 +701,7 @@ func (c *controller) loadIpamDriver(name string) (*ipamData, error) { id, ok := c.ipamDrivers[name] c.Unlock() if !ok { - return nil, ErrInvalidNetworkDriver(name) + return nil, types.BadRequestErrorf("invalid ipam driver: %q", name) } return id, nil } diff --git a/vendor/github.com/docker/libnetwork/default_gateway.go b/vendor/github.com/docker/libnetwork/default_gateway.go index 5d58b061..bfd7b725 100644 --- a/vendor/github.com/docker/libnetwork/default_gateway.go +++ b/vendor/github.com/docker/libnetwork/default_gateway.go @@ -87,7 +87,7 @@ func (sb *sandbox) clearDefaultGW() error { if err := ep.sbLeave(sb); err != nil { return fmt.Errorf("container %s: endpoint leaving GW Network failed: %v", sb.containerID, err) } - if err := ep.Delete(); err != nil { + if err := ep.Delete(false); err != nil { return fmt.Errorf("container %s: deleting endpoint on GW Network failed: %v", sb.containerID, err) } return nil @@ -103,10 +103,21 @@ func (sb *sandbox) needDefaultGW() bool { if ep.getNetwork().Type() == "null" || ep.getNetwork().Type() == "host" { continue } + if ep.getNetwork().Internal() { + return false + } + if ep.joinInfo.disableGatewayService { + return false + } // TODO v6 needs to be handled. if len(ep.Gateway()) > 0 { return false } + for _, r := range ep.StaticRoutes() { + if r.Destination.String() == "0.0.0.0/0" { + return false + } + } needGW = true } return needGW diff --git a/vendor/github.com/docker/libnetwork/endpoint.go b/vendor/github.com/docker/libnetwork/endpoint.go index de7b652d..88312e9c 100644 --- a/vendor/github.com/docker/libnetwork/endpoint.go +++ b/vendor/github.com/docker/libnetwork/endpoint.go @@ -41,7 +41,7 @@ type Endpoint interface { DriverInfo() (map[string]interface{}, error) // Delete and detaches this endpoint from the network. - Delete() error + Delete(force bool) error } // EndpointOption is a option setter function type used to pass varios options to Network @@ -50,18 +50,25 @@ type Endpoint interface { type EndpointOption func(ep *endpoint) type endpoint struct { - name string - id string - network *network - iface *endpointInterface - joinInfo *endpointJoinInfo - sandboxID string - exposedPorts []types.TransportPort - anonymous bool - generic map[string]interface{} - joinLeaveDone chan struct{} - dbIndex uint64 - dbExists bool + name string + id string + network *network + iface *endpointInterface + joinInfo *endpointJoinInfo + sandboxID string + locator string + exposedPorts []types.TransportPort + anonymous bool + disableResolution bool + generic map[string]interface{} + joinLeaveDone chan struct{} + prefAddress net.IP + prefAddressV6 net.IP + ipamOptions map[string]string + aliases map[string]string + myAliases []string + dbIndex uint64 + dbExists bool sync.Mutex } @@ -78,7 +85,10 @@ func (ep *endpoint) MarshalJSON() ([]byte, error) { epMap["generic"] = ep.generic } epMap["sandbox"] = ep.sandboxID + epMap["locator"] = ep.locator epMap["anonymous"] = ep.anonymous + epMap["disableResolution"] = ep.disableResolution + epMap["myAliases"] = ep.myAliases return json.Marshal(epMap) } @@ -156,6 +166,16 @@ func (ep *endpoint) UnmarshalJSON(b []byte) (err error) { if v, ok := epMap["anonymous"]; ok { ep.anonymous = v.(bool) } + if v, ok := epMap["disableResolution"]; ok { + ep.disableResolution = v.(bool) + } + if l, ok := epMap["locator"]; ok { + ep.locator = l.(string) + } + ma, _ := json.Marshal(epMap["myAliases"]) + var myAliases []string + json.Unmarshal(ma, &myAliases) + ep.myAliases = myAliases return nil } @@ -171,9 +191,11 @@ func (ep *endpoint) CopyTo(o datastore.KVObject) error { dstEp.name = ep.name dstEp.id = ep.id dstEp.sandboxID = ep.sandboxID + dstEp.locator = ep.locator dstEp.dbIndex = ep.dbIndex dstEp.dbExists = ep.dbExists dstEp.anonymous = ep.anonymous + dstEp.disableResolution = ep.disableResolution if ep.iface != nil { dstEp.iface = &endpointInterface{} @@ -183,6 +205,9 @@ func (ep *endpoint) CopyTo(o datastore.KVObject) error { dstEp.exposedPorts = make([]types.TransportPort, len(ep.exposedPorts)) copy(dstEp.exposedPorts, ep.exposedPorts) + dstEp.myAliases = make([]string, len(ep.myAliases)) + copy(dstEp.myAliases, ep.myAliases) + dstEp.generic = options.Generic{} for k, v := range ep.generic { dstEp.generic[k] = v @@ -205,6 +230,13 @@ func (ep *endpoint) Name() string { return ep.name } +func (ep *endpoint) MyAliases() []string { + ep.Lock() + defer ep.Unlock() + + return ep.myAliases +} + func (ep *endpoint) Network() string { if ep.network == nil { return "" @@ -219,6 +251,12 @@ func (ep *endpoint) isAnonymous() bool { return ep.anonymous } +func (ep *endpoint) needResolver() bool { + ep.Lock() + defer ep.Unlock() + return !ep.disableResolution +} + // endpoint Key structure : endpoint/network-id/endpoint-id func (ep *endpoint) Key() []string { if ep.network == nil { @@ -386,17 +424,16 @@ func (ep *endpoint) sbJoin(sbox Sandbox, options ...EndpointOption) error { } }() + // Watch for service records + network.getController().watchSvcRecord(ep) + address := "" if ip := ep.getFirstInterfaceAddress(); ip != nil { address = ip.String() } - if err = sb.updateHostsFile(address, network.getSvcRecords(ep)); err != nil { + if err = sb.updateHostsFile(address); err != nil { return err } - - // Watch for service records - network.getController().watchSvcRecord(ep) - if err = sb.updateDNS(network.enableIPv6); err != nil { return err } @@ -559,7 +596,7 @@ func (ep *endpoint) sbLeave(sbox Sandbox, options ...EndpointOption) error { sb.deleteHostsEntries(n.getSvcRecords(ep)) - if sb.needDefaultGW() { + if !sb.inDelete && sb.needDefaultGW() { ep := sb.getEPwithoutGateway() if ep == nil { return fmt.Errorf("endpoint without GW expected, but not found") @@ -569,7 +606,19 @@ func (ep *endpoint) sbLeave(sbox Sandbox, options ...EndpointOption) error { return sb.clearDefaultGW() } -func (ep *endpoint) Delete() error { +func (n *network) validateForceDelete(locator string) error { + if n.Scope() == datastore.LocalScope { + return nil + } + + if locator == "" { + return fmt.Errorf("invalid endpoint locator identifier") + } + + return nil +} + +func (ep *endpoint) Delete(force bool) error { var err error n, err := ep.getNetworkFromStore() if err != nil { @@ -584,17 +633,33 @@ func (ep *endpoint) Delete() error { ep.Lock() epid := ep.id name := ep.name - if ep.sandboxID != "" { - ep.Unlock() + sbid := ep.sandboxID + locator := ep.locator + ep.Unlock() + + if force { + if err = n.validateForceDelete(locator); err != nil { + return fmt.Errorf("unable to force delete endpoint %s: %v", name, err) + } + } + + sb, _ := n.getController().SandboxByID(sbid) + if sb != nil && !force { return &ActiveContainerError{name: name, id: epid} } - ep.Unlock() + + if sb != nil { + if e := ep.sbLeave(sb); e != nil { + log.Warnf("failed to leave sandbox for endpoint %s : %v", name, e) + } + } if err = n.getController().deleteFromStore(ep); err != nil { return err } + defer func() { - if err != nil { + if err != nil && !force { ep.dbExists = false if e := n.getController().updateToStore(ep); e != nil { log.Warnf("failed to recreate endpoint in store %s : %v", name, e) @@ -602,11 +667,11 @@ func (ep *endpoint) Delete() error { } }() - if err = n.getEpCnt().DecEndpointCnt(); err != nil { + if err = n.getEpCnt().DecEndpointCnt(); err != nil && !force { return err } defer func() { - if err != nil { + if err != nil && !force { if e := n.getEpCnt().IncEndpointCnt(); e != nil { log.Warnf("failed to update network %s : %v", n.name, e) } @@ -616,7 +681,7 @@ func (ep *endpoint) Delete() error { // unwatch for service records n.getController().unWatchSvcRecord(ep) - if err = ep.deleteEndpoint(); err != nil { + if err = ep.deleteEndpoint(); err != nil && !force { return err } @@ -651,8 +716,8 @@ func (ep *endpoint) deleteEndpoint() error { } func (ep *endpoint) getSandbox() (*sandbox, bool) { - ep.Lock() c := ep.network.getController() + ep.Lock() sid := ep.sandboxID ep.Unlock() @@ -684,6 +749,15 @@ func EndpointOptionGeneric(generic map[string]interface{}) EndpointOption { } } +// CreateOptionIpam function returns an option setter for the ipam configuration for this endpoint +func CreateOptionIpam(ipV4, ipV6 net.IP, ipamOptions map[string]string) EndpointOption { + return func(ep *endpoint) { + ep.prefAddress = ipV4 + ep.prefAddressV6 = ipV6 + ep.ipamOptions = ipamOptions + } +} + // CreateOptionExposedPorts function returns an option setter for the container exposed // ports option to be passed to network.CreateEndpoint() method. func CreateOptionExposedPorts(exposedPorts []types.TransportPort) EndpointOption { @@ -716,6 +790,31 @@ func CreateOptionAnonymous() EndpointOption { } } +// CreateOptionDisableResolution function returns an option setter to indicate +// this endpoint doesn't want embedded DNS server functionality +func CreateOptionDisableResolution() EndpointOption { + return func(ep *endpoint) { + ep.disableResolution = true + } +} + +//CreateOptionAlias function returns an option setter for setting endpoint alias +func CreateOptionAlias(name string, alias string) EndpointOption { + return func(ep *endpoint) { + if ep.aliases == nil { + ep.aliases = make(map[string]string) + } + ep.aliases[alias] = name + } +} + +//CreateOptionMyAlias function returns an option setter for setting endpoint's self alias +func CreateOptionMyAlias(alias string) EndpointOption { + return func(ep *endpoint) { + ep.myAliases = append(ep.myAliases, alias) + } +} + // JoinOptionPriority function returns an option setter for priority option to // be passed to the endpoint.Join() method. func JoinOptionPriority(ep Endpoint, prio int) EndpointOption { @@ -737,11 +836,8 @@ func (ep *endpoint) DataScope() string { return ep.getNetwork().DataScope() } -func (ep *endpoint) assignAddress() error { - var ( - ipam ipamapi.Ipam - err error - ) +func (ep *endpoint) assignAddress(ipam ipamapi.Ipam, assignIPv4, assignIPv6 bool) error { + var err error n := ep.getNetwork() if n.Type() == "host" || n.Type() == "null" { @@ -750,21 +846,25 @@ func (ep *endpoint) assignAddress() error { log.Debugf("Assigning addresses for endpoint %s's interface on network %s", ep.Name(), n.Name()) - ipam, err = n.getController().getIpamDriver(n.ipamType) - if err != nil { - return err + if assignIPv4 { + if err = ep.assignAddressVersion(4, ipam); err != nil { + return err + } } - err = ep.assignAddressVersion(4, ipam) - if err != nil { - return err + + if assignIPv6 { + err = ep.assignAddressVersion(6, ipam) } - return ep.assignAddressVersion(6, ipam) + + return err } func (ep *endpoint) assignAddressVersion(ipVer int, ipam ipamapi.Ipam) error { var ( poolID *string address **net.IPNet + prefAdd net.IP + progAdd net.IP ) n := ep.getNetwork() @@ -772,9 +872,11 @@ func (ep *endpoint) assignAddressVersion(ipVer int, ipam ipamapi.Ipam) error { case 4: poolID = &ep.iface.v4PoolID address = &ep.iface.addr + prefAdd = ep.prefAddress case 6: poolID = &ep.iface.v6PoolID address = &ep.iface.addrv6 + prefAdd = ep.prefAddressV6 default: return types.InternalErrorf("incorrect ip version number passed: %d", ipVer) } @@ -786,8 +888,19 @@ func (ep *endpoint) assignAddressVersion(ipVer int, ipam ipamapi.Ipam) error { return nil } + // The address to program may be chosen by the user or by the network driver in one specific + // case to support backward compatibility with `docker daemon --fixed-cidrv6` use case + if prefAdd != nil { + progAdd = prefAdd + } else if *address != nil { + progAdd = (*address).IP + } + for _, d := range ipInfo { - addr, _, err := ipam.RequestAddress(d.PoolID, nil, nil) + if progAdd != nil && !d.Pool.Contains(progAdd) { + continue + } + addr, _, err := ipam.RequestAddress(d.PoolID, progAdd, ep.ipamOptions) if err == nil { ep.Lock() *address = addr @@ -795,10 +908,13 @@ func (ep *endpoint) assignAddressVersion(ipVer int, ipam ipamapi.Ipam) error { ep.Unlock() return nil } - if err != ipamapi.ErrNoAvailableIPs { + if err != ipamapi.ErrNoAvailableIPs || progAdd != nil { return err } } + if progAdd != nil { + return types.BadRequestErrorf("Invalid preferred address %s: It does not belong to any of this network's subnets") + } return fmt.Errorf("no available IPv%d addresses on this network's address pools: %s (%s)", ipVer, n.Name(), n.ID()) } @@ -840,7 +956,7 @@ func (c *controller) cleanupLocalEndpoints() { } for _, ep := range epl { - if err := ep.Delete(); err != nil { + if err := ep.Delete(false); err != nil { log.Warnf("Could not delete local endpoint %s during endpoint cleanup: %v", ep.name, err) } } diff --git a/vendor/github.com/docker/libnetwork/endpoint_info.go b/vendor/github.com/docker/libnetwork/endpoint_info.go index db23eb73..624bc533 100644 --- a/vendor/github.com/docker/libnetwork/endpoint_info.go +++ b/vendor/github.com/docker/libnetwork/endpoint_info.go @@ -25,6 +25,10 @@ type EndpointInfo interface { // This will only return a valid value if a container has joined the endpoint. GatewayIPv6() net.IP + // StaticRoutes returns the list of static routes configured by the network + // driver when the container joins a network + StaticRoutes() []*types.StaticRoute + // Sandbox returns the attached sandbox if there, nil otherwise. Sandbox() Sandbox } @@ -136,9 +140,10 @@ func (epi *endpointInterface) CopyTo(dstEpi *endpointInterface) error { } type endpointJoinInfo struct { - gw net.IP - gw6 net.IP - StaticRoutes []*types.StaticRoute + gw net.IP + gw6 net.IP + StaticRoutes []*types.StaticRoute + disableGatewayService bool } func (ep *endpoint) Info() EndpointInfo { @@ -159,7 +164,11 @@ func (ep *endpoint) Info() EndpointInfo { return ep } - return sb.getEndpoint(ep.ID()) + if epi := sb.getEndpoint(ep.ID()); epi != nil { + return epi + } + + return nil } func (ep *endpoint) DriverInfo() (map[string]interface{}, error) { @@ -291,6 +300,17 @@ func (ep *endpoint) Sandbox() Sandbox { return cnt } +func (ep *endpoint) StaticRoutes() []*types.StaticRoute { + ep.Lock() + defer ep.Unlock() + + if ep.joinInfo == nil { + return nil + } + + return ep.joinInfo.StaticRoutes +} + func (ep *endpoint) Gateway() net.IP { ep.Lock() defer ep.Unlock() @@ -336,3 +356,10 @@ func (ep *endpoint) retrieveFromStore() (*endpoint, error) { } return n.getEndpointFromStore(ep.ID()) } + +func (ep *endpoint) DisableGatewayService() { + ep.Lock() + defer ep.Unlock() + + ep.joinInfo.disableGatewayService = true +} diff --git a/vendor/github.com/docker/libnetwork/libnetwork_internal_test.go b/vendor/github.com/docker/libnetwork/libnetwork_internal_test.go index 7eb259f2..44ee68f9 100644 --- a/vendor/github.com/docker/libnetwork/libnetwork_internal_test.go +++ b/vendor/github.com/docker/libnetwork/libnetwork_internal_test.go @@ -34,6 +34,30 @@ func TestDriverRegistration(t *testing.T) { } } +func TestIpamDriverRegistration(t *testing.T) { + c, err := New() + if err != nil { + t.Fatal(err) + } + defer c.Stop() + + err = c.(*controller).RegisterIpamDriver("", nil) + if err == nil { + t.Fatalf("Expected failure, but suceeded") + } + if _, ok := err.(types.BadRequestError); !ok { + t.Fatalf("Failed for unexpected reason: %v", err) + } + + err = c.(*controller).RegisterIpamDriver(ipamapi.DefaultIPAM, nil) + if err == nil { + t.Fatalf("Expected failure, but suceeded") + } + if _, ok := err.(types.ForbiddenError); !ok { + t.Fatalf("Failed for unexpected reason: %v", err) + } +} + func TestNetworkMarshalling(t *testing.T) { n := &network{ name: "Miao", @@ -43,20 +67,19 @@ func TestNetworkMarshalling(t *testing.T) { networkType: "bridge", enableIPv6: true, persist: true, + ipamOptions: map[string]string{ + netlabel.MacAddress: "a:b:c:d:e:f", + }, ipamV4Config: []*IpamConf{ &IpamConf{ PreferredPool: "10.2.0.0/16", SubPool: "10.2.0.0/24", - Options: map[string]string{ - netlabel.MacAddress: "a:b:c:d:e:f", - }, - Gateway: "", - AuxAddresses: nil, + Gateway: "", + AuxAddresses: nil, }, &IpamConf{ PreferredPool: "10.2.0.0/16", SubPool: "10.2.1.0/24", - Options: nil, Gateway: "10.2.1.254", }, }, @@ -241,7 +264,6 @@ func compareIpamConfList(listA, listB []*IpamConf) bool { b = listB[i] if a.PreferredPool != b.PreferredPool || a.SubPool != b.SubPool || - !compareStringMaps(a.Options, b.Options) || a.Gateway != b.Gateway || !compareStringMaps(a.AuxAddresses, b.AuxAddresses) { return false } @@ -350,7 +372,7 @@ func TestIpamReleaseOnNetDriverFailures(t *testing.T) { // Test whether ipam state release is invoked on network create failure from net driver // by checking whether subsequent network creation requesting same gateway IP succeeds - ipamOpt := NetworkOptionIpam(ipamapi.DefaultIPAM, "", []*IpamConf{&IpamConf{PreferredPool: "10.34.0.0/16", Gateway: "10.34.255.254"}}, nil) + ipamOpt := NetworkOptionIpam(ipamapi.DefaultIPAM, "", []*IpamConf{&IpamConf{PreferredPool: "10.34.0.0/16", Gateway: "10.34.255.254"}}, nil, nil) if _, err := c.NewNetwork(badDriverName, "badnet1", ipamOpt); err == nil { t.Fatalf("bad network driver should have failed network creation") } @@ -374,7 +396,7 @@ func TestIpamReleaseOnNetDriverFailures(t *testing.T) { } // Now create good bridge network with different gateway - ipamOpt2 := NetworkOptionIpam(ipamapi.DefaultIPAM, "", []*IpamConf{&IpamConf{PreferredPool: "10.34.0.0/16", Gateway: "10.34.255.253"}}, nil) + ipamOpt2 := NetworkOptionIpam(ipamapi.DefaultIPAM, "", []*IpamConf{&IpamConf{PreferredPool: "10.34.0.0/16", Gateway: "10.34.255.253"}}, nil, nil) gnw, err = c.NewNetwork("bridge", "goodnet2", ipamOpt2) if err != nil { t.Fatal(err) @@ -385,7 +407,7 @@ func TestIpamReleaseOnNetDriverFailures(t *testing.T) { if err != nil { t.Fatal(err) } - defer ep.Delete() + defer ep.Delete(false) expectedIP, _ := types.ParseCIDR("10.34.0.1/16") if !types.CompareIPNet(ep.Info().Iface().Address(), expectedIP) { diff --git a/vendor/github.com/docker/libnetwork/libnetwork_test.go b/vendor/github.com/docker/libnetwork/libnetwork_test.go index 255261b5..5f305e02 100644 --- a/vendor/github.com/docker/libnetwork/libnetwork_test.go +++ b/vendor/github.com/docker/libnetwork/libnetwork_test.go @@ -6,6 +6,7 @@ import ( "flag" "fmt" "io/ioutil" + "net" "net/http" "net/http/httptest" "os" @@ -86,7 +87,7 @@ func createController() error { func createTestNetwork(networkType, networkName string, netOption options.Generic, ipamV4Configs, ipamV6Configs []*libnetwork.IpamConf) (libnetwork.Network, error) { return controller.NewNetwork(networkType, networkName, libnetwork.NetworkOptionGeneric(netOption), - libnetwork.NetworkOptionIpam(ipamapi.DefaultIPAM, "", ipamV4Configs, ipamV6Configs)) + libnetwork.NetworkOptionIpam(ipamapi.DefaultIPAM, "", ipamV4Configs, ipamV6Configs, nil)) } func getEmptyGenericOption() map[string]interface{} { @@ -134,7 +135,7 @@ func TestNull(t *testing.T) { t.Fatal(err) } - if err := ep.Delete(); err != nil { + if err := ep.Delete(false); err != nil { t.Fatal(err) } @@ -212,11 +213,11 @@ func TestHost(t *testing.T) { t.Fatal(err) } - if err := ep1.Delete(); err != nil { + if err := ep1.Delete(false); err != nil { t.Fatal(err) } - if err := ep2.Delete(); err != nil { + if err := ep2.Delete(false); err != nil { t.Fatal(err) } @@ -248,7 +249,7 @@ func TestHost(t *testing.T) { t.Fatal(err) } - if err := ep3.Delete(); err != nil { + if err := ep3.Delete(false); err != nil { t.Fatal(err) } @@ -304,7 +305,60 @@ func TestBridge(t *testing.T) { t.Fatalf("Incomplete data for port mapping in endpoint operational data: %d", len(pm)) } - if err := ep.Delete(); err != nil { + if err := ep.Delete(false); err != nil { + t.Fatal(err) + } + + if err := network.Delete(); err != nil { + t.Fatal(err) + } +} + +// Testing IPV6 from MAC address +func TestBridgeIpv6FromMac(t *testing.T) { + if !testutils.IsRunningInContainer() { + defer testutils.SetupTestOSContext(t)() + } + + netOption := options.Generic{ + netlabel.GenericData: options.Generic{ + "BridgeName": "testipv6mac", + "EnableIPv6": true, + "EnableICC": true, + "EnableIPMasquerade": true, + }, + } + ipamV4ConfList := []*libnetwork.IpamConf{&libnetwork.IpamConf{PreferredPool: "192.168.100.0/24", Gateway: "192.168.100.1"}} + ipamV6ConfList := []*libnetwork.IpamConf{&libnetwork.IpamConf{PreferredPool: "fe90::/64", Gateway: "fe90::22"}} + + network, err := controller.NewNetwork(bridgeNetType, "testipv6mac", + libnetwork.NetworkOptionGeneric(netOption), + libnetwork.NetworkOptionIpam(ipamapi.DefaultIPAM, "", ipamV4ConfList, ipamV6ConfList, nil), + libnetwork.NetworkOptionDeferIPv6Alloc(true)) + if err != nil { + t.Fatal(err) + } + + mac := net.HardwareAddr{0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff} + epOption := options.Generic{netlabel.MacAddress: mac} + + ep, err := network.CreateEndpoint("testep", libnetwork.EndpointOptionGeneric(epOption)) + if err != nil { + t.Fatal(err) + } + + iface := ep.Info().Iface() + if !bytes.Equal(iface.MacAddress(), mac) { + t.Fatalf("Unexpected mac address: %v", iface.MacAddress()) + } + + ip, expIP, _ := net.ParseCIDR("fe90::aabb:ccdd:eeff/64") + expIP.IP = ip + if !types.CompareIPNet(expIP, iface.AddressIPv6()) { + t.Fatalf("Expected %v. Got: %v", expIP, iface.AddressIPv6()) + } + + if err := ep.Delete(false); err != nil { t.Fatal(err) } @@ -460,7 +514,7 @@ func TestDeleteNetworkWithActiveEndpoints(t *testing.T) { } // Done testing. Now cleanup. - if err := ep.Delete(); err != nil { + if err := ep.Delete(false); err != nil { t.Fatal(err) } @@ -532,7 +586,7 @@ func TestUnknownEndpoint(t *testing.T) { t.Fatal(err) } - err = ep.Delete() + err = ep.Delete(false) if err != nil { t.Fatal(err) } @@ -570,7 +624,7 @@ func TestNetworkEndpointsWalkers(t *testing.T) { t.Fatal(err) } defer func() { - if err := ep11.Delete(); err != nil { + if err := ep11.Delete(false); err != nil { t.Fatal(err) } }() @@ -580,7 +634,7 @@ func TestNetworkEndpointsWalkers(t *testing.T) { t.Fatal(err) } defer func() { - if err := ep12.Delete(); err != nil { + if err := ep12.Delete(false); err != nil { t.Fatal(err) } }() @@ -698,7 +752,7 @@ func TestDuplicateEndpoint(t *testing.T) { t.Fatal(err) } defer func() { - if err := ep.Delete(); err != nil { + if err := ep.Delete(false); err != nil { t.Fatal(err) } }() @@ -707,7 +761,7 @@ func TestDuplicateEndpoint(t *testing.T) { defer func() { // Cleanup ep2 as well, else network cleanup might fail for failure cases if ep2 != nil { - if err := ep2.Delete(); err != nil { + if err := ep2.Delete(false); err != nil { t.Fatal(err) } } @@ -850,7 +904,7 @@ func TestNetworkQuery(t *testing.T) { t.Fatal(err) } defer func() { - if err := ep11.Delete(); err != nil { + if err := ep11.Delete(false); err != nil { t.Fatal(err) } }() @@ -860,7 +914,7 @@ func TestNetworkQuery(t *testing.T) { t.Fatal(err) } defer func() { - if err := ep12.Delete(); err != nil { + if err := ep12.Delete(false); err != nil { t.Fatal(err) } }() @@ -951,11 +1005,19 @@ func TestEndpointJoin(t *testing.T) { } // Create network 1 and add 2 endpoint: ep11, ep12 - n1, err := createTestNetwork(bridgeNetType, "testnetwork1", options.Generic{ + netOption := options.Generic{ netlabel.GenericData: options.Generic{ - "BridgeName": "testnetwork1", + "BridgeName": "testnetwork1", + "EnableIPv6": true, + "EnableICC": true, + "EnableIPMasquerade": true, }, - }, nil, nil) + } + ipamV6ConfList := []*libnetwork.IpamConf{&libnetwork.IpamConf{PreferredPool: "fe90::/64", Gateway: "fe90::22"}} + n1, err := controller.NewNetwork(bridgeNetType, "testnetwork1", + libnetwork.NetworkOptionGeneric(netOption), + libnetwork.NetworkOptionIpam(ipamapi.DefaultIPAM, "", nil, ipamV6ConfList, nil), + libnetwork.NetworkOptionDeferIPv6Alloc(true)) if err != nil { t.Fatal(err) } @@ -970,7 +1032,7 @@ func TestEndpointJoin(t *testing.T) { t.Fatal(err) } defer func() { - if err := ep1.Delete(); err != nil { + if err := ep1.Delete(false); err != nil { t.Fatal(err) } }() @@ -981,10 +1043,16 @@ func TestEndpointJoin(t *testing.T) { if iface.Address() != nil && iface.Address().IP.To4() == nil { t.Fatalf("Invalid IP address returned: %v", iface.Address()) } + if iface.AddressIPv6() != nil && iface.AddressIPv6().IP == nil { + t.Fatalf("Invalid IPv6 address returned: %v", iface.Address()) + } - if info.Gateway().To4() != nil { + if len(info.Gateway()) != 0 { t.Fatalf("Expected empty gateway for an empty endpoint. Instead found a gateway: %v", info.Gateway()) } + if len(info.GatewayIPv6()) != 0 { + t.Fatalf("Expected empty gateway for an empty ipv6 endpoint. Instead found a gateway: %v", info.GatewayIPv6()) + } if info.Sandbox() != nil { t.Fatalf("Expected an empty sandbox key for an empty endpoint. Instead found a non-empty sandbox key: %s", info.Sandbox().Key()) @@ -1036,9 +1104,12 @@ func TestEndpointJoin(t *testing.T) { // Validate if ep.Info() only gives valid gateway and sandbox key after has container has joined. info = ep1.Info() - if info.Gateway().To4() == nil { + if len(info.Gateway()) == 0 { t.Fatalf("Expected a valid gateway for a joined endpoint. Instead found an invalid gateway: %v", info.Gateway()) } + if len(info.GatewayIPv6()) == 0 { + t.Fatalf("Expected a valid ipv6 gateway for a joined endpoint. Instead found an invalid gateway: %v", info.GatewayIPv6()) + } if info.Sandbox() == nil { t.Fatalf("Expected an non-empty sandbox key for a joined endpoint. Instead found a empty sandbox key") @@ -1079,7 +1150,7 @@ func TestEndpointJoin(t *testing.T) { t.Fatal(err) } defer func() { - if err := ep2.Delete(); err != nil { + if err := ep2.Delete(false); err != nil { t.Fatal(err) } }() @@ -1142,6 +1213,14 @@ func (f *fakeSandbox) SetKey(key string) error { return nil } +func (f *fakeSandbox) ResolveName(name string) net.IP { + return nil +} + +func (f *fakeSandbox) ResolveIP(ip string) string { + return "" +} + func TestExternalKey(t *testing.T) { externalKeyTest(t, false) } @@ -1174,7 +1253,7 @@ func externalKeyTest(t *testing.T, reexec bool) { t.Fatal(err) } defer func() { - err = ep.Delete() + err = ep.Delete(false) if err != nil { t.Fatal(err) } @@ -1185,7 +1264,7 @@ func externalKeyTest(t *testing.T, reexec bool) { t.Fatal(err) } defer func() { - err = ep2.Delete() + err = ep2.Delete(false) if err != nil { t.Fatal(err) } @@ -1323,7 +1402,7 @@ func TestEndpointDeleteWithActiveContainer(t *testing.T) { t.Fatal(err) } defer func() { - err = ep.Delete() + err = ep.Delete(false) if err != nil { t.Fatal(err) } @@ -1352,7 +1431,7 @@ func TestEndpointDeleteWithActiveContainer(t *testing.T) { } }() - err = ep.Delete() + err = ep.Delete(false) if err == nil { t.Fatal("Expected to fail. But instead succeeded") } @@ -1386,7 +1465,7 @@ func TestEndpointMultipleJoins(t *testing.T) { t.Fatal(err) } defer func() { - if err := ep.Delete(); err != nil { + if err := ep.Delete(false); err != nil { t.Fatal(err) } }() @@ -1510,7 +1589,7 @@ func TestontainerInvalidLeave(t *testing.T) { t.Fatal(err) } defer func() { - if err := ep.Delete(); err != nil { + if err := ep.Delete(false); err != nil { t.Fatal(err) } }() @@ -1627,6 +1706,7 @@ func TestEnableIPv6(t *testing.T) { } tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\nnameserver 2001:4860:4860::8888\n") + expectedResolvConf := []byte("search pommesfrites.fr\nnameserver 127.0.0.11\noptions ndots:0\n") //take a copy of resolv.conf for restoring after test completes resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") if err != nil { @@ -1645,7 +1725,7 @@ func TestEnableIPv6(t *testing.T) { "BridgeName": "testnetwork", }, } - ipamV6ConfList := []*libnetwork.IpamConf{&libnetwork.IpamConf{PreferredPool: "fe80::/64"}} + ipamV6ConfList := []*libnetwork.IpamConf{&libnetwork.IpamConf{PreferredPool: "fe99::/64", Gateway: "fe99::9"}} n, err := createTestNetwork("bridge", "testnetwork", netOption, nil, ipamV6ConfList) if err != nil { @@ -1689,8 +1769,8 @@ func TestEnableIPv6(t *testing.T) { t.Fatal(err) } - if !bytes.Equal(content, tmpResolvConf) { - t.Fatalf("Expected:\n%s\nGot:\n%s", string(tmpResolvConf), string(content)) + if !bytes.Equal(content, expectedResolvConf) { + t.Fatalf("Expected:\n%s\nGot:\n%s", string(expectedResolvConf), string(content)) } if err != nil { @@ -1722,7 +1802,7 @@ func TestResolvConfHost(t *testing.T) { t.Fatal(err) } - ep1, err := n.CreateEndpoint("ep1", nil) + ep1, err := n.CreateEndpoint("ep1", libnetwork.CreateOptionDisableResolution()) if err != nil { t.Fatal(err) } @@ -1783,9 +1863,8 @@ func TestResolvConf(t *testing.T) { } tmpResolvConf1 := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\nnameserver 2001:4860:4860::8888\n") - expectedResolvConf1 := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n") tmpResolvConf2 := []byte("search pommesfrites.fr\nnameserver 112.34.56.78\nnameserver 2001:4860:4860::8888\n") - expectedResolvConf2 := []byte("search pommesfrites.fr\nnameserver 112.34.56.78\n") + expectedResolvConf1 := []byte("search pommesfrites.fr\nnameserver 127.0.0.11\noptions ndots:0\n") tmpResolvConf3 := []byte("search pommesfrites.fr\nnameserver 113.34.56.78\n") //take a copy of resolv.conf for restoring after test completes @@ -1894,8 +1973,8 @@ func TestResolvConf(t *testing.T) { t.Fatal(err) } - if !bytes.Equal(content, expectedResolvConf2) { - t.Fatalf("Expected:\n%s\nGot:\n%s", string(expectedResolvConf2), string(content)) + if !bytes.Equal(content, expectedResolvConf1) { + t.Fatalf("Expected:\n%s\nGot:\n%s", string(expectedResolvConf1), string(content)) } if err := ioutil.WriteFile(resolvConfPath, tmpResolvConf3, 0644); err != nil { @@ -2255,7 +2334,7 @@ func runParallelTests(t *testing.T, thrNumber int) { t.Fatal(err) } } else { - err = ep.Delete() + err = ep.Delete(false) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/docker/libnetwork/machines b/vendor/github.com/docker/libnetwork/machines new file mode 100755 index 00000000..4d4b407e --- /dev/null +++ b/vendor/github.com/docker/libnetwork/machines @@ -0,0 +1,111 @@ +#/bin/sh + +set -e + +usage() +{ +cat << EOF +NAME: + machines - Create Test Environments for Docker Networking + +VERSION: + 0.1 + +USAGE: + $0 [command_options] [arguments...] + +COMMANDS: + help + Help and usage + + up + Create environment with given KV store + zookeeper | etcd | consul (default) + Create N nodes, default = 2 + + destroy + Destroy Environment + +EOF +} + +step() { + printf "\033[0;36m-----> $@\033[0m\n" +} + +up() +{ + step "Creating KV Store Machine" + docker-machine create \ + -d virtualbox \ + mh-kv + + step "KV Store is $1" + step "Starting KV Container" + case "$1" in + etcd) + cluster_store="cluster-store=etcd://$(docker-machine ip mh-kv):2379" + docker $(docker-machine config mh-kv) run -d \ + -p "2379:2379" \ + -h "etcd" \ + --name "etcd" \ + quay.io/coreos/etcd:v2.2.1 \ + --listen-client-urls="http://0.0.0.0:2379" \ + --advertise-client-urls="http://$(docker-machine ip mh-kv):2379" + ;; + zookeeper) + cluster_store="cluster-store=zk://$(docker-machine ip mh-kv):2181" + docker $(docker-machine config mh-kv) run -d \ + -p "2181:2181" \ + -h "zookeeper" \ + --name "zookeeper" \ + tianon/zookeeper + ;; + *) + cluster_store="cluster-store=consul://$(docker-machine ip mh-kv):8500" + docker $(docker-machine config mh-kv) run -d \ + -p "8500:8500" \ + -h "consul" \ + --name "consul" \ + progrium/consul -server -bootstrap-expect 1 + ;; + esac + + machines=$2 + if [ -z machines ]; then + machines=2 + fi + step "Creating $machines Machines" + + for i in $(seq $machines); do + step "Creating machine $i" + docker-machine create \ + -d virtualbox \ + --engine-opt="cluster-advertise=eth1:2376" \ + --engine-opt="$cluster_store" \ + mh-$i + done +} + +destroy() +{ + for x in $(docker-machine ls | grep mh- | awk '{ print $1 }'); do + docker-machine rm $x + done +} + +case "$1" in + up) + shift + up $@ + ;; + destroy) + destroy $@ + ;; + help) + usage + ;; + *) + usage + ;; +esac diff --git a/vendor/github.com/docker/libnetwork/network.go b/vendor/github.com/docker/libnetwork/network.go index 25846028..7449c90a 100644 --- a/vendor/github.com/docker/libnetwork/network.go +++ b/vendor/github.com/docker/libnetwork/network.go @@ -16,6 +16,7 @@ import ( "github.com/docker/libnetwork/etchosts" "github.com/docker/libnetwork/ipamapi" "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" "github.com/docker/libnetwork/options" "github.com/docker/libnetwork/types" ) @@ -57,16 +58,21 @@ type Network interface { // NetworkInfo returns some configuration and operational information about the network type NetworkInfo interface { - IpamConfig() (string, []*IpamConf, []*IpamConf) + IpamConfig() (string, map[string]string, []*IpamConf, []*IpamConf) + IpamInfo() ([]*IpamInfo, []*IpamInfo) DriverOptions() map[string]string Scope() string + Internal() bool } // EndpointWalker is a client provided function which will be used to walk the Endpoints. // When the function returns true, the walk will stop. type EndpointWalker func(ep Endpoint) bool -type svcMap map[string]net.IP +type svcInfo struct { + svcMap map[string][]net.IP + ipMap map[string]string +} // IpamConf contains all the ipam related configurations for a network type IpamConf struct { @@ -75,8 +81,6 @@ type IpamConf struct { // A subset of the master pool. If specified, // this becomes the container pool SubPool string - // Input options for IPAM Driver (optional) - Options map[string]string // Preferred Network Gateway address (optional) Gateway string // Auxiliary addresses for network driver. Must be within the master pool. @@ -146,20 +150,22 @@ type network struct { networkType string id string ipamType string + ipamOptions map[string]string addrSpace string ipamV4Config []*IpamConf ipamV6Config []*IpamConf ipamV4Info []*IpamInfo ipamV6Info []*IpamInfo enableIPv6 bool + postIPv6 bool epCnt *endpointCnt generic options.Generic dbIndex uint64 - svcRecords svcMap dbExists bool persist bool stopWatchCh chan struct{} drvOnce *sync.Once + internal bool sync.Mutex } @@ -248,12 +254,6 @@ func (c *IpamConf) CopyTo(dstC *IpamConf) error { dstC.PreferredPool = c.PreferredPool dstC.SubPool = c.SubPool dstC.Gateway = c.Gateway - if c.Options != nil { - dstC.Options = make(map[string]string, len(c.Options)) - for k, v := range c.Options { - dstC.Options[k] = v - } - } if c.AuxAddresses != nil { dstC.AuxAddresses = make(map[string]string, len(c.AuxAddresses)) for k, v := range c.AuxAddresses { @@ -298,9 +298,11 @@ func (n *network) CopyTo(o datastore.KVObject) error { dstN.ipamType = n.ipamType dstN.enableIPv6 = n.enableIPv6 dstN.persist = n.persist + dstN.postIPv6 = n.postIPv6 dstN.dbIndex = n.dbIndex dstN.dbExists = n.dbExists dstN.drvOnce = n.drvOnce + dstN.internal = n.internal for _, v4conf := range n.ipamV4Config { dstV4Conf := &IpamConf{} @@ -358,6 +360,7 @@ func (n *network) MarshalJSON() ([]byte, error) { netMap["generic"] = n.generic } netMap["persist"] = n.persist + netMap["postIPv6"] = n.postIPv6 if len(n.ipamV4Config) > 0 { ics, err := json.Marshal(n.ipamV4Config) if err != nil { @@ -386,6 +389,7 @@ func (n *network) MarshalJSON() ([]byte, error) { } netMap["ipamV6Info"] = string(iis) } + netMap["internal"] = n.internal return json.Marshal(netMap) } @@ -418,6 +422,9 @@ func (n *network) UnmarshalJSON(b []byte) (err error) { if v, ok := netMap["persist"]; ok { n.persist = v.(bool) } + if v, ok := netMap["postIPv6"]; ok { + n.postIPv6 = v.(bool) + } if v, ok := netMap["ipamType"]; ok { n.ipamType = v.(string) } else { @@ -446,6 +453,9 @@ func (n *network) UnmarshalJSON(b []byte) (err error) { return err } } + if v, ok := netMap["internal"]; ok { + n.internal = v.(bool) + } return nil } @@ -472,12 +482,25 @@ func NetworkOptionPersist(persist bool) NetworkOption { } } +// NetworkOptionInternalNetwork returns an option setter to config the network +// to be internal which disables default gateway service +func NetworkOptionInternalNetwork() NetworkOption { + return func(n *network) { + n.internal = true + if n.generic == nil { + n.generic = make(map[string]interface{}) + } + n.generic[netlabel.Internal] = true + } +} + // NetworkOptionIpam function returns an option setter for the ipam configuration for this network -func NetworkOptionIpam(ipamDriver string, addrSpace string, ipV4 []*IpamConf, ipV6 []*IpamConf) NetworkOption { +func NetworkOptionIpam(ipamDriver string, addrSpace string, ipV4 []*IpamConf, ipV6 []*IpamConf, opts map[string]string) NetworkOption { return func(n *network) { if ipamDriver != "" { n.ipamType = ipamDriver } + n.ipamOptions = opts n.addrSpace = addrSpace n.ipamV4Config = ipV4 n.ipamV6Config = ipV6 @@ -505,6 +528,16 @@ func NetworkOptionDriverOpts(opts map[string]string) NetworkOption { } } +// NetworkOptionDeferIPv6Alloc instructs the network to defer the IPV6 address allocation until after the endpoint has been created +// It is being provided to support the specific docker daemon flags where user can deterministically assign an IPv6 address +// to a container as combination of fixed-cidr-v6 + mac-address +// TODO: Remove this option setter once we support endpoint ipam options +func NetworkOptionDeferIPv6Alloc(enable bool) NetworkOption { + return func(n *network) { + n.postIPv6 = enable + } +} + func (n *network) processOptions(options ...NetworkOption) { for _, opt := range options { if opt != nil { @@ -587,12 +620,13 @@ func (n *network) Delete() error { if err = n.getController().deleteFromStore(n.getEpCnt()); err != nil { return fmt.Errorf("error deleting network endpoint count from store: %v", err) } + + n.ipamRelease() + if err = n.getController().deleteFromStore(n); err != nil { return fmt.Errorf("error deleting network from store: %v", err) } - n.ipamRelease() - return nil } @@ -647,6 +681,7 @@ func (n *network) CreateEndpoint(name string, options ...EndpointOption) (Endpoi // Initialize ep.network with a possibly stale copy of n. We need this to get network from // store. But once we get it from store we will have the most uptodate copy possible. ep.network = n + ep.locator = n.getController().clusterHostID() ep.network, err = ep.getNetworkFromStore() if err != nil { return nil, fmt.Errorf("failed to get network during CreateEndpoint: %v", err) @@ -655,7 +690,28 @@ func (n *network) CreateEndpoint(name string, options ...EndpointOption) (Endpoi ep.processOptions(options...) - if err = ep.assignAddress(); err != nil { + if opt, ok := ep.generic[netlabel.MacAddress]; ok { + if mac, ok := opt.(net.HardwareAddr); ok { + ep.iface.mac = mac + } + } + + ipam, err := n.getController().getIPAM(n.ipamType) + if err != nil { + return nil, err + } + + if ipam.capability.RequiresMACAddress { + if ep.iface.mac == nil { + ep.iface.mac = netutils.GenerateRandomMAC() + } + if ep.ipamOptions == nil { + ep.ipamOptions = make(map[string]string) + } + ep.ipamOptions[netlabel.MacAddress] = ep.iface.mac.String() + } + + if err = ep.assignAddress(ipam.driver, true, !n.postIPv6); err != nil { return nil, err } defer func() { @@ -675,6 +731,10 @@ func (n *network) CreateEndpoint(name string, options ...EndpointOption) (Endpoi } }() + if err = ep.assignAddress(ipam.driver, false, n.postIPv6); err != nil { + return nil, err + } + if err = n.getController().updateToStore(ep); err != nil { return nil, err } @@ -762,69 +822,81 @@ func (n *network) EndpointByID(id string) (Endpoint, error) { } func (n *network) updateSvcRecord(ep *endpoint, localEps []*endpoint, isAdd bool) { - if ep.isAnonymous() { - return + epName := ep.Name() + if iface := ep.Iface(); iface.Address() != nil { + myAliases := ep.MyAliases() + if isAdd { + if !ep.isAnonymous() { + n.addSvcRecords(epName, iface.Address().IP, true) + } + for _, alias := range myAliases { + n.addSvcRecords(alias, iface.Address().IP, false) + } + } else { + if !ep.isAnonymous() { + n.deleteSvcRecords(epName, iface.Address().IP, true) + } + for _, alias := range myAliases { + n.deleteSvcRecords(alias, iface.Address().IP, false) + } + } } +} +func (n *network) addSvcRecords(name string, epIP net.IP, ipMapUpdate bool) { c := n.getController() + c.Lock() + defer c.Unlock() sr, ok := c.svcDb[n.ID()] if !ok { - c.svcDb[n.ID()] = svcMap{} - sr = c.svcDb[n.ID()] - } - - n.Lock() - var recs []etchosts.Record - if iface := ep.Iface(); iface.Address() != nil { - if isAdd { - // If we already have this endpoint in service db just return - if _, ok := sr[ep.Name()]; ok { - n.Unlock() - return - } - - sr[ep.Name()] = iface.Address().IP - sr[ep.Name()+"."+n.name] = iface.Address().IP - } else { - delete(sr, ep.Name()) - delete(sr, ep.Name()+"."+n.name) + sr = svcInfo{ + svcMap: make(map[string][]net.IP), + ipMap: make(map[string]string), } - - recs = append(recs, etchosts.Record{ - Hosts: ep.Name(), - IP: iface.Address().IP.String(), - }) - - recs = append(recs, etchosts.Record{ - Hosts: ep.Name() + "." + n.name, - IP: iface.Address().IP.String(), - }) + c.svcDb[n.ID()] = sr } - n.Unlock() - // If there are no records to add or delete then simply return here - if len(recs) == 0 { + if ipMapUpdate { + reverseIP := netutils.ReverseIP(epIP.String()) + if _, ok := sr.ipMap[reverseIP]; !ok { + sr.ipMap[reverseIP] = name + } + } + + ipList := sr.svcMap[name] + for _, ip := range ipList { + if ip.Equal(epIP) { + return + } + } + sr.svcMap[name] = append(sr.svcMap[name], epIP) +} + +func (n *network) deleteSvcRecords(name string, epIP net.IP, ipMapUpdate bool) { + c := n.getController() + c.Lock() + defer c.Unlock() + sr, ok := c.svcDb[n.ID()] + if !ok { return } - var sbList []*sandbox - for _, lEp := range localEps { - if ep.ID() == lEp.ID() { - continue - } - - if sb, hasSandbox := lEp.getSandbox(); hasSandbox { - sbList = append(sbList, sb) - } + if ipMapUpdate { + delete(sr.ipMap, netutils.ReverseIP(epIP.String())) } - for _, sb := range sbList { - if isAdd { - sb.addHostsEntries(recs) - } else { - sb.deleteHostsEntries(recs) + ipList := sr.svcMap[name] + for i, ip := range ipList { + if ip.Equal(epIP) { + ipList = append(ipList[:i], ipList[i+1:]...) + break } } + sr.svcMap[name] = ipList + + if len(ipList) == 0 { + delete(sr.svcMap, name) + } } func (n *network) getSvcRecords(ep *endpoint) []etchosts.Record { @@ -834,14 +906,14 @@ func (n *network) getSvcRecords(ep *endpoint) []etchosts.Record { var recs []etchosts.Record sr, _ := n.ctrlr.svcDb[n.id] - for h, ip := range sr { + for h, ip := range sr.svcMap { if ep != nil && strings.Split(h, ".")[0] == ep.Name() { continue } recs = append(recs, etchosts.Record{ Hosts: h, - IP: ip.String(), + IP: ip[0].String(), }) } @@ -921,7 +993,7 @@ func (n *network) ipamAllocateVersion(ipVer int, ipam ipamapi.Ipam) error { d := &IpamInfo{} (*infoList)[i] = d - d.PoolID, d.Pool, d.Meta, err = ipam.RequestPool(n.addrSpace, cfg.PreferredPool, cfg.SubPool, cfg.Options, ipVer == 6) + d.PoolID, d.Pool, d.Meta, err = ipam.RequestPool(n.addrSpace, cfg.PreferredPool, cfg.SubPool, n.ipamOptions, ipVer == 6) if err != nil { return err } @@ -944,7 +1016,10 @@ func (n *network) ipamAllocateVersion(ipVer int, ipam ipamapi.Ipam) error { // irrespective of whether ipam driver returned a gateway already. // If none of the above is true, libnetwork will allocate one. if cfg.Gateway != "" || d.Gateway == nil { - if d.Gateway, _, err = ipam.RequestAddress(d.PoolID, net.ParseIP(cfg.Gateway), nil); err != nil { + var gatewayOpts = map[string]string{ + ipamapi.RequestAddressType: netlabel.Gateway, + } + if d.Gateway, _, err = ipam.RequestAddress(d.PoolID, net.ParseIP(cfg.Gateway), gatewayOpts); err != nil { return types.InternalErrorf("failed to allocate gateway (%v): %v", cfg.Gateway, err) } } @@ -1097,7 +1172,7 @@ func (n *network) Scope() string { return n.driverScope() } -func (n *network) IpamConfig() (string, []*IpamConf, []*IpamConf) { +func (n *network) IpamConfig() (string, map[string]string, []*IpamConf, []*IpamConf) { n.Lock() defer n.Unlock() @@ -1116,5 +1191,34 @@ func (n *network) IpamConfig() (string, []*IpamConf, []*IpamConf) { v6L[i] = cc } - return n.ipamType, v4L, v6L + return n.ipamType, n.ipamOptions, v4L, v6L +} + +func (n *network) IpamInfo() ([]*IpamInfo, []*IpamInfo) { + n.Lock() + defer n.Unlock() + + v4Info := make([]*IpamInfo, len(n.ipamV4Info)) + v6Info := make([]*IpamInfo, len(n.ipamV6Info)) + + for i, info := range n.ipamV4Info { + ic := &IpamInfo{} + info.CopyTo(ic) + v4Info[i] = ic + } + + for i, info := range n.ipamV6Info { + ic := &IpamInfo{} + info.CopyTo(ic) + v6Info[i] = ic + } + + return v4Info, v6Info +} + +func (n *network) Internal() bool { + n.Lock() + defer n.Unlock() + + return n.internal } diff --git a/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go index f981b1fc..6c6dac58 100644 --- a/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go +++ b/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go @@ -5,7 +5,7 @@ import ( ) // IPLocalhost is a regex patter for localhost IP address range. -const IPLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1))` +const IPLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)` var localhostIPRegexp = regexp.MustCompile(IPLocalhost) diff --git a/vendor/github.com/docker/libnetwork/resolver.go b/vendor/github.com/docker/libnetwork/resolver.go new file mode 100644 index 00000000..d395ab46 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/resolver.go @@ -0,0 +1,208 @@ +package libnetwork + +import ( + "fmt" + "net" + "strings" + + log "github.com/Sirupsen/logrus" + "github.com/docker/libnetwork/iptables" + "github.com/miekg/dns" +) + +// Resolver represents the embedded DNS server in Docker. It operates +// by listening on container's loopback interface for DNS queries. +type Resolver interface { + // Start starts the name server for the container + Start() error + // Stop stops the name server for the container. Stopped resolver + // can be reused after running the SetupFunc again. + Stop() + // SetupFunc() provides the setup function that should be run + // in the container's network namespace. + SetupFunc() func() + // NameServer() returns the IP of the DNS resolver for the + // containers. + NameServer() string + // To configure external name servers the resolver should use + SetExtServers([]string) + // ResolverOptions returns resolv.conf options that should be set + ResolverOptions() []string +} + +const ( + resolverIP = "127.0.0.11" + dnsPort = "53" + ptrIPv4domain = ".in-addr.arpa." + ptrIPv6domain = ".ip6.arpa." + respTTL = 1800 +) + +// resolver implements the Resolver interface +type resolver struct { + sb *sandbox + extDNS []string + server *dns.Server + conn *net.UDPConn + err error +} + +// NewResolver creates a new instance of the Resolver +func NewResolver(sb *sandbox) Resolver { + return &resolver{ + sb: sb, + err: fmt.Errorf("setup not done yet"), + } +} + +func (r *resolver) SetupFunc() func() { + return (func() { + var err error + + addr := &net.UDPAddr{ + IP: net.ParseIP(resolverIP), + } + + r.conn, err = net.ListenUDP("udp", addr) + if err != nil { + r.err = fmt.Errorf("error in opening name server socket %v", err) + return + } + laddr := r.conn.LocalAddr() + _, ipPort, _ := net.SplitHostPort(laddr.String()) + + rules := [][]string{ + {"-t", "nat", "-A", "OUTPUT", "-d", resolverIP, "-p", "udp", "--dport", dnsPort, "-j", "DNAT", "--to-destination", laddr.String()}, + {"-t", "nat", "-A", "POSTROUTING", "-s", resolverIP, "-p", "udp", "--sport", ipPort, "-j", "SNAT", "--to-source", ":" + dnsPort}, + } + + for _, rule := range rules { + r.err = iptables.RawCombinedOutput(rule...) + if r.err != nil { + return + } + } + r.err = nil + }) +} + +func (r *resolver) Start() error { + // make sure the resolver has been setup before starting + if r.err != nil { + return r.err + } + s := &dns.Server{Handler: r, PacketConn: r.conn} + r.server = s + go func() { + s.ActivateAndServe() + }() + return nil +} + +func (r *resolver) Stop() { + if r.server != nil { + r.server.Shutdown() + } + r.conn = nil + r.err = fmt.Errorf("setup not done yet") +} + +func (r *resolver) SetExtServers(dns []string) { + r.extDNS = dns +} + +func (r *resolver) NameServer() string { + return resolverIP +} + +func (r *resolver) ResolverOptions() []string { + return []string{"ndots:0"} +} + +func (r *resolver) handleIPv4Query(name string, query *dns.Msg) (*dns.Msg, error) { + addr := r.sb.ResolveName(name) + if addr == nil { + return nil, nil + } + + log.Debugf("Lookup for %s: IP %s", name, addr.String()) + + resp := new(dns.Msg) + resp.SetReply(query) + + rr := new(dns.A) + rr.Hdr = dns.RR_Header{Name: name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: respTTL} + rr.A = addr + resp.Answer = append(resp.Answer, rr) + return resp, nil +} + +func (r *resolver) handlePTRQuery(ptr string, query *dns.Msg) (*dns.Msg, error) { + parts := []string{} + + if strings.HasSuffix(ptr, ptrIPv4domain) { + parts = strings.Split(ptr, ptrIPv4domain) + } else if strings.HasSuffix(ptr, ptrIPv6domain) { + parts = strings.Split(ptr, ptrIPv6domain) + } else { + return nil, fmt.Errorf("invalid PTR query, %v", ptr) + } + + host := r.sb.ResolveIP(parts[0]) + if len(host) == 0 { + return nil, nil + } + + log.Debugf("Lookup for IP %s: name %s", parts[0], host) + fqdn := dns.Fqdn(host) + + resp := new(dns.Msg) + resp.SetReply(query) + + rr := new(dns.PTR) + rr.Hdr = dns.RR_Header{Name: ptr, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: respTTL} + rr.Ptr = fqdn + resp.Answer = append(resp.Answer, rr) + return resp, nil +} + +func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) { + var ( + resp *dns.Msg + err error + ) + + name := query.Question[0].Name + if query.Question[0].Qtype == dns.TypeA { + resp, err = r.handleIPv4Query(name, query) + } else if query.Question[0].Qtype == dns.TypePTR { + resp, err = r.handlePTRQuery(name, query) + } + + if err != nil { + log.Error(err) + return + } + + if resp == nil { + if len(r.extDNS) == 0 { + return + } + log.Debugf("Querying ext dns %s for %s[%d]", r.extDNS[0], name, query.Question[0].Qtype) + + c := &dns.Client{Net: "udp"} + addr := fmt.Sprintf("%s:%d", r.extDNS[0], 53) + + // TODO: iterate over avilable servers in case of error + resp, _, err = c.Exchange(query, addr) + if err != nil { + log.Errorf("external resolution failed, %s", err) + return + } + } + + err = w.WriteMsg(resp) + if err != nil { + log.Errorf("error writing resolver resp, %s", err) + } +} diff --git a/vendor/github.com/docker/libnetwork/sandbox.go b/vendor/github.com/docker/libnetwork/sandbox.go index b29c67fd..9dbb100e 100644 --- a/vendor/github.com/docker/libnetwork/sandbox.go +++ b/vendor/github.com/docker/libnetwork/sandbox.go @@ -5,9 +5,11 @@ import ( "encoding/json" "fmt" "io/ioutil" + "net" "os" "path" "path/filepath" + "strings" "sync" log "github.com/Sirupsen/logrus" @@ -38,6 +40,12 @@ type Sandbox interface { Rename(name string) error // Delete destroys this container after detaching it from all connected endpoints. Delete() error + // ResolveName searches for the service name in the networks to which the sandbox + // is connected to. + ResolveName(name string) net.IP + // ResolveIP returns the service name for the passed in IP. IP is in reverse dotted + // notation; the format used for DNS PTR records + ResolveIP(name string) string } // SandboxOption is a option setter function type used to pass varios options to @@ -59,10 +67,12 @@ type sandbox struct { id string containerID string config containerConfig + extDNS []string osSbox osl.Sandbox controller *controller + resolver Resolver + resolverOnce sync.Once refCnt int - hostsOnce sync.Once endpoints epHeap epPriority map[string]int joinLeaveDone chan struct{} @@ -188,7 +198,7 @@ func (sb *sandbox) Delete() error { log.Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err) } - if err := ep.Delete(); err != nil { + if err := ep.Delete(false); err != nil { log.Warnf("Failed deleting endpoint %s: %v\n", ep.ID(), err) } } @@ -203,6 +213,10 @@ func (sb *sandbox) Delete() error { // likely not required any more. Drop it. etchosts.Drop(sb.config.hostsPath) + if sb.resolver != nil { + sb.resolver.Stop() + } + if sb.osSbox != nil && !sb.config.useDefaultSandBox { sb.osSbox.Destroy() } @@ -292,6 +306,26 @@ func (sb *sandbox) UnmarshalJSON(b []byte) (err error) { return nil } +func (sb *sandbox) startResolver() { + sb.resolverOnce.Do(func() { + var err error + sb.resolver = NewResolver(sb) + defer func() { + if err != nil { + sb.resolver = nil + } + }() + + sb.rebuildDNS() + sb.resolver.SetExtServers(sb.extDNS) + + sb.osSbox.InvokeFunc(sb.resolver.SetupFunc()) + if err := sb.resolver.Start(); err != nil { + log.Errorf("Resolver Setup/Start failed for container %s, %q", sb.ContainerID(), err) + } + }) +} + func (sb *sandbox) setupResolutionFiles() error { if err := sb.buildHostsFile(); err != nil { return err @@ -362,24 +396,114 @@ func (sb *sandbox) updateGateway(ep *endpoint) error { return nil } +func (sb *sandbox) ResolveIP(ip string) string { + var svc string + log.Debugf("IP To resolve %v", ip) + + for _, ep := range sb.getConnectedEndpoints() { + n := ep.getNetwork() + + sr, ok := n.getController().svcDb[n.ID()] + if !ok { + continue + } + + nwName := n.Name() + n.Lock() + svc, ok = sr.ipMap[ip] + n.Unlock() + if ok { + return svc + "." + nwName + } + } + return svc +} + +func (sb *sandbox) ResolveName(name string) net.IP { + var ip net.IP + parts := strings.Split(name, ".") + log.Debugf("To resolve %v", parts) + + reqName := parts[0] + networkName := "" + if len(parts) > 1 { + networkName = parts[1] + } + epList := sb.getConnectedEndpoints() + // First check for local container alias + ip = sb.resolveName(reqName, networkName, epList, true) + if ip != nil { + return ip + } + + // Resolve the actual container name + return sb.resolveName(reqName, networkName, epList, false) +} + +func (sb *sandbox) resolveName(req string, networkName string, epList []*endpoint, alias bool) net.IP { + for _, ep := range epList { + name := req + n := ep.getNetwork() + + if networkName != "" && networkName != n.Name() { + continue + } + + if alias { + if ep.aliases == nil { + continue + } + + var ok bool + ep.Lock() + name, ok = ep.aliases[req] + ep.Unlock() + if !ok { + continue + } + } else { + // If it is a regular lookup and if the requested name is an alias + // dont perform a svc lookup for this endpoint. + ep.Lock() + if _, ok := ep.aliases[req]; ok { + ep.Unlock() + continue + } + ep.Unlock() + } + + sr, ok := n.getController().svcDb[n.ID()] + if !ok { + continue + } + + n.Lock() + ip, ok := sr.svcMap[name] + n.Unlock() + if ok { + return ip[0] + } + } + return nil +} + func (sb *sandbox) SetKey(basePath string) error { - var err error if basePath == "" { return types.BadRequestErrorf("invalid sandbox key") } sb.Lock() - osSbox := sb.osSbox + oldosSbox := sb.osSbox sb.Unlock() - if osSbox != nil { + if oldosSbox != nil { // If we already have an OS sandbox, release the network resources from that // and destroy the OS snab. We are moving into a new home further down. Note that none // of the network resources gets destroyed during the move. sb.releaseOSSbox() } - osSbox, err = osl.GetSandboxForExternalKey(basePath, sb.Key()) + osSbox, err := osl.GetSandboxForExternalKey(basePath, sb.Key()) if err != nil { return err } @@ -395,6 +519,17 @@ func (sb *sandbox) SetKey(basePath string) error { } }() + // If the resolver was setup before stop it and set it up in the + // new osl sandbox. + if oldosSbox != nil && sb.resolver != nil { + sb.resolver.Stop() + + sb.osSbox.InvokeFunc(sb.resolver.SetupFunc()) + if err := sb.resolver.Start(); err != nil { + log.Errorf("Resolver Setup/Start failed for container %s, %q", sb.ContainerID(), err) + } + } + for _, ep := range sb.getConnectedEndpoints() { if err = sb.populateNetworkResources(ep); err != nil { return err @@ -460,6 +595,10 @@ func (sb *sandbox) populateNetworkResources(ep *endpoint) error { i := ep.iface ep.Unlock() + if ep.needResolver() { + sb.startResolver() + } + if i != nil && i.srcName != "" { var ifaceOptions []osl.IfaceOption @@ -600,45 +739,21 @@ func (sb *sandbox) buildHostsFile() error { return etchosts.Build(sb.config.hostsPath, "", sb.config.hostName, sb.config.domainName, extraContent) } -func (sb *sandbox) updateHostsFile(ifaceIP string, svcRecords []etchosts.Record) error { - var err error +func (sb *sandbox) updateHostsFile(ifaceIP string) error { + var mhost string if sb.config.originHostsPath != "" { return nil } - max := func(a, b int) int { - if a < b { - return b - } - - return a + if sb.config.domainName != "" { + mhost = fmt.Sprintf("%s.%s %s", sb.config.hostName, sb.config.domainName, + sb.config.hostName) + } else { + mhost = sb.config.hostName } - extraContent := make([]etchosts.Record, 0, - max(len(sb.config.extraHosts), len(svcRecords))) - - sb.hostsOnce.Do(func() { - // Rebuild the hosts file accounting for the passed - // interface IP and service records - - for _, extraHost := range sb.config.extraHosts { - extraContent = append(extraContent, - etchosts.Record{Hosts: extraHost.name, IP: extraHost.IP}) - } - - err = etchosts.Build(sb.config.hostsPath, ifaceIP, - sb.config.hostName, sb.config.domainName, extraContent) - }) - - if err != nil { - return err - } - - extraContent = extraContent[:0] - for _, svc := range svcRecords { - extraContent = append(extraContent, svc) - } + extraContent := []etchosts.Record{{Hosts: mhost, IP: ifaceIP}} sb.addHostsEntries(extraContent) return nil @@ -808,6 +923,48 @@ func (sb *sandbox) updateDNS(ipv6Enabled bool) error { return os.Rename(tmpResolvFile.Name(), sb.config.resolvConfPath) } +// Embedded DNS server has to be enabled for this sandbox. Rebuild the container's +// resolv.conf by doing the follwing +// - Save the external name servers in resolv.conf in the sandbox +// - Add only the embedded server's IP to container's resolv.conf +// - If the embedded server needs any resolv.conf options add it to the current list +func (sb *sandbox) rebuildDNS() error { + currRC, err := resolvconf.GetSpecific(sb.config.resolvConfPath) + if err != nil { + return err + } + + // localhost entries have already been filtered out from the list + sb.extDNS = resolvconf.GetNameservers(currRC.Content) + + var ( + dnsList = []string{sb.resolver.NameServer()} + dnsOptionsList = resolvconf.GetOptions(currRC.Content) + dnsSearchList = resolvconf.GetSearchDomains(currRC.Content) + ) + + // Resolver returns the options in the format resolv.conf expects + dnsOptionsList = append(dnsOptionsList, sb.resolver.ResolverOptions()...) + + dir := path.Dir(sb.config.resolvConfPath) + tmpResolvFile, err := ioutil.TempFile(dir, "resolv") + if err != nil { + return err + } + + // Change the perms to filePerm (0644) since ioutil.TempFile creates it by default as 0600 + if err := os.Chmod(tmpResolvFile.Name(), filePerm); err != nil { + return err + } + + _, err = resolvconf.Build(tmpResolvFile.Name(), dnsList, dnsSearchList, dnsOptionsList) + if err != nil { + return err + } + + return os.Rename(tmpResolvFile.Name(), sb.config.resolvConfPath) +} + // joinLeaveStart waits to ensure there are no joins or leaves in progress and // marks this join/leave in progress without race func (sb *sandbox) joinLeaveStart() { diff --git a/vendor/github.com/docker/libnetwork/sandbox_externalkey.go b/vendor/github.com/docker/libnetwork/sandbox_externalkey.go index 0b2385b3..3c362f30 100644 --- a/vendor/github.com/docker/libnetwork/sandbox_externalkey.go +++ b/vendor/github.com/docker/libnetwork/sandbox_externalkey.go @@ -1,19 +1,6 @@ package libnetwork -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "os" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/libnetwork/types" - "github.com/opencontainers/runc/libcontainer" - "github.com/opencontainers/runc/libcontainer/configs" -) +import "github.com/docker/docker/pkg/reexec" type setKeyData struct { ContainerID string @@ -23,163 +10,3 @@ type setKeyData struct { func init() { reexec.Register("libnetwork-setkey", processSetKeyReexec) } - -const udsBase = "/var/lib/docker/network/files/" -const success = "success" - -// processSetKeyReexec is a private function that must be called only on an reexec path -// It expects 3 args { [0] = "libnetwork-setkey", [1] = , [2] = } -// It also expects libcontainer.State as a json string in -// Refer to https://github.com/opencontainers/runc/pull/160/ for more information -func processSetKeyReexec() { - var err error - - // Return a failure to the calling process via ExitCode - defer func() { - if err != nil { - logrus.Fatalf("%v", err) - } - }() - - // expecting 3 args {[0]="libnetwork-setkey", [1]=, [2]= } - if len(os.Args) < 3 { - err = fmt.Errorf("Re-exec expects 3 args, received : %d", len(os.Args)) - return - } - containerID := os.Args[1] - - // We expect libcontainer.State as a json string in - stateBuf, err := ioutil.ReadAll(os.Stdin) - if err != nil { - return - } - var state libcontainer.State - if err = json.Unmarshal(stateBuf, &state); err != nil { - return - } - - controllerID := os.Args[2] - key := state.NamespacePaths[configs.NamespaceType("NEWNET")] - - err = SetExternalKey(controllerID, containerID, key) - return -} - -// SetExternalKey provides a convenient way to set an External key to a sandbox -func SetExternalKey(controllerID string, containerID string, key string) error { - keyData := setKeyData{ - ContainerID: containerID, - Key: key} - - c, err := net.Dial("unix", udsBase+controllerID+".sock") - if err != nil { - return err - } - defer c.Close() - - if err = sendKey(c, keyData); err != nil { - return fmt.Errorf("sendKey failed with : %v", err) - } - return processReturn(c) -} - -func sendKey(c net.Conn, data setKeyData) error { - var err error - defer func() { - if err != nil { - c.Close() - } - }() - - var b []byte - if b, err = json.Marshal(data); err != nil { - return err - } - - _, err = c.Write(b) - return err -} - -func processReturn(r io.Reader) error { - buf := make([]byte, 1024) - n, err := r.Read(buf[:]) - if err != nil { - return fmt.Errorf("failed to read buf in processReturn : %v", err) - } - if string(buf[0:n]) != success { - return fmt.Errorf(string(buf[0:n])) - } - return nil -} - -func (c *controller) startExternalKeyListener() error { - if err := os.MkdirAll(udsBase, 0600); err != nil { - return err - } - uds := udsBase + c.id + ".sock" - l, err := net.Listen("unix", uds) - if err != nil { - return err - } - if err := os.Chmod(uds, 0600); err != nil { - l.Close() - return err - } - c.Lock() - c.extKeyListener = l - c.Unlock() - - go c.acceptClientConnections(uds, l) - return nil -} - -func (c *controller) acceptClientConnections(sock string, l net.Listener) { - for { - conn, err := l.Accept() - if err != nil { - if _, err1 := os.Stat(sock); os.IsNotExist(err1) { - logrus.Debugf("Unix socket %s doesnt exist. cannot accept client connections", sock) - return - } - logrus.Errorf("Error accepting connection %v", err) - continue - } - go func() { - err := c.processExternalKey(conn) - ret := success - if err != nil { - ret = err.Error() - } - - _, err = conn.Write([]byte(ret)) - if err != nil { - logrus.Errorf("Error returning to the client %v", err) - } - }() - } -} - -func (c *controller) processExternalKey(conn net.Conn) error { - buf := make([]byte, 1280) - nr, err := conn.Read(buf) - if err != nil { - return err - } - var s setKeyData - if err = json.Unmarshal(buf[0:nr], &s); err != nil { - return err - } - - var sandbox Sandbox - search := SandboxContainerWalker(&sandbox, s.ContainerID) - c.WalkSandboxes(search) - if sandbox == nil { - return types.BadRequestErrorf("no sandbox present for %s", s.ContainerID) - } - - return sandbox.SetKey(s.Key) -} - -func (c *controller) stopExternalKeyListener() { - c.extKeyListener.Close() -} diff --git a/vendor/github.com/docker/libnetwork/sandbox_externalkey_unix.go b/vendor/github.com/docker/libnetwork/sandbox_externalkey_unix.go new file mode 100644 index 00000000..74ae2af7 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/sandbox_externalkey_unix.go @@ -0,0 +1,177 @@ +// +build !windows + +package libnetwork + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + + "github.com/Sirupsen/logrus" + "github.com/docker/libnetwork/types" + "github.com/opencontainers/runc/libcontainer" + "github.com/opencontainers/runc/libcontainer/configs" +) + +const udsBase = "/var/lib/docker/network/files/" +const success = "success" + +// processSetKeyReexec is a private function that must be called only on an reexec path +// It expects 3 args { [0] = "libnetwork-setkey", [1] = , [2] = } +// It also expects libcontainer.State as a json string in +// Refer to https://github.com/opencontainers/runc/pull/160/ for more information +func processSetKeyReexec() { + var err error + + // Return a failure to the calling process via ExitCode + defer func() { + if err != nil { + logrus.Fatalf("%v", err) + } + }() + + // expecting 3 args {[0]="libnetwork-setkey", [1]=, [2]= } + if len(os.Args) < 3 { + err = fmt.Errorf("Re-exec expects 3 args, received : %d", len(os.Args)) + return + } + containerID := os.Args[1] + + // We expect libcontainer.State as a json string in + stateBuf, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return + } + var state libcontainer.State + if err = json.Unmarshal(stateBuf, &state); err != nil { + return + } + + controllerID := os.Args[2] + key := state.NamespacePaths[configs.NamespaceType("NEWNET")] + + err = SetExternalKey(controllerID, containerID, key) + return +} + +// SetExternalKey provides a convenient way to set an External key to a sandbox +func SetExternalKey(controllerID string, containerID string, key string) error { + keyData := setKeyData{ + ContainerID: containerID, + Key: key} + + c, err := net.Dial("unix", udsBase+controllerID+".sock") + if err != nil { + return err + } + defer c.Close() + + if err = sendKey(c, keyData); err != nil { + return fmt.Errorf("sendKey failed with : %v", err) + } + return processReturn(c) +} + +func sendKey(c net.Conn, data setKeyData) error { + var err error + defer func() { + if err != nil { + c.Close() + } + }() + + var b []byte + if b, err = json.Marshal(data); err != nil { + return err + } + + _, err = c.Write(b) + return err +} + +func processReturn(r io.Reader) error { + buf := make([]byte, 1024) + n, err := r.Read(buf[:]) + if err != nil { + return fmt.Errorf("failed to read buf in processReturn : %v", err) + } + if string(buf[0:n]) != success { + return fmt.Errorf(string(buf[0:n])) + } + return nil +} + +func (c *controller) startExternalKeyListener() error { + if err := os.MkdirAll(udsBase, 0600); err != nil { + return err + } + uds := udsBase + c.id + ".sock" + l, err := net.Listen("unix", uds) + if err != nil { + return err + } + if err := os.Chmod(uds, 0600); err != nil { + l.Close() + return err + } + c.Lock() + c.extKeyListener = l + c.Unlock() + + go c.acceptClientConnections(uds, l) + return nil +} + +func (c *controller) acceptClientConnections(sock string, l net.Listener) { + for { + conn, err := l.Accept() + if err != nil { + if _, err1 := os.Stat(sock); os.IsNotExist(err1) { + logrus.Debugf("Unix socket %s doesnt exist. cannot accept client connections", sock) + return + } + logrus.Errorf("Error accepting connection %v", err) + continue + } + go func() { + err := c.processExternalKey(conn) + ret := success + if err != nil { + ret = err.Error() + } + + _, err = conn.Write([]byte(ret)) + if err != nil { + logrus.Errorf("Error returning to the client %v", err) + } + }() + } +} + +func (c *controller) processExternalKey(conn net.Conn) error { + buf := make([]byte, 1280) + nr, err := conn.Read(buf) + if err != nil { + return err + } + var s setKeyData + if err = json.Unmarshal(buf[0:nr], &s); err != nil { + return err + } + + var sandbox Sandbox + search := SandboxContainerWalker(&sandbox, s.ContainerID) + c.WalkSandboxes(search) + if sandbox == nil { + return types.BadRequestErrorf("no sandbox present for %s", s.ContainerID) + } + + return sandbox.SetKey(s.Key) +} + +func (c *controller) stopExternalKeyListener() { + c.extKeyListener.Close() +} diff --git a/vendor/github.com/docker/libnetwork/sandbox_externalkey_windows.go b/vendor/github.com/docker/libnetwork/sandbox_externalkey_windows.go new file mode 100644 index 00000000..3c4113e2 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/sandbox_externalkey_windows.go @@ -0,0 +1,45 @@ +// +build windows + +package libnetwork + +import ( + "io" + "net" + + "github.com/docker/libnetwork/types" +) + +// processSetKeyReexec is a private function that must be called only on an reexec path +// It expects 3 args { [0] = "libnetwork-setkey", [1] = , [2] = } +// It also expects libcontainer.State as a json string in +// Refer to https://github.com/opencontainers/runc/pull/160/ for more information +func processSetKeyReexec() { +} + +// SetExternalKey provides a convenient way to set an External key to a sandbox +func SetExternalKey(controllerID string, containerID string, key string) error { + return types.NotImplementedErrorf("SetExternalKey isn't supported on non linux systems") +} + +func sendKey(c net.Conn, data setKeyData) error { + return types.NotImplementedErrorf("sendKey isn't supported on non linux systems") +} + +func processReturn(r io.Reader) error { + return types.NotImplementedErrorf("processReturn isn't supported on non linux systems") +} + +// no-op on non linux systems +func (c *controller) startExternalKeyListener() error { + return nil +} + +func (c *controller) acceptClientConnections(sock string, l net.Listener) { +} + +func (c *controller) processExternalKey(conn net.Conn) error { + return types.NotImplementedErrorf("processExternalKey isn't supported on non linux systems") +} + +func (c *controller) stopExternalKeyListener() { +} diff --git a/vendor/github.com/docker/libnetwork/store.go b/vendor/github.com/docker/libnetwork/store.go index 1ea2f7ae..be3e8ae6 100644 --- a/vendor/github.com/docker/libnetwork/store.go +++ b/vendor/github.com/docker/libnetwork/store.go @@ -129,7 +129,9 @@ func (c *controller) getNetworksFromStore() ([]*network, error) { for _, kvo := range kvol { n := kvo.(*network) + n.Lock() n.ctrlr = c + n.Unlock() ec := &endpointCnt{n: n} err = store.GetObject(datastore.Key(ec.Key()...), ec) @@ -176,7 +178,6 @@ func (n *network) getEndpointsFromStore() ([]*endpoint, error) { for _, kvo := range kvol { ep := kvo.(*endpoint) - ep.network = n epl = append(epl, ep) } } diff --git a/vendor/github.com/docker/libtrust/CONTRIBUTING.md b/vendor/github.com/docker/libtrust/CONTRIBUTING.md deleted file mode 100644 index 05be0f8a..00000000 --- a/vendor/github.com/docker/libtrust/CONTRIBUTING.md +++ /dev/null @@ -1,13 +0,0 @@ -# Contributing to libtrust - -Want to hack on libtrust? Awesome! Here are instructions to get you -started. - -libtrust is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read -[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). - -Happy hacking! diff --git a/vendor/github.com/docker/libtrust/LICENSE b/vendor/github.com/docker/libtrust/LICENSE deleted file mode 100644 index 27448585..00000000 --- a/vendor/github.com/docker/libtrust/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/libtrust/MAINTAINERS b/vendor/github.com/docker/libtrust/MAINTAINERS deleted file mode 100644 index 9768175f..00000000 --- a/vendor/github.com/docker/libtrust/MAINTAINERS +++ /dev/null @@ -1,3 +0,0 @@ -Solomon Hykes -Josh Hawn (github: jlhawn) -Derek McGowan (github: dmcgowan) diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md deleted file mode 100644 index 8e7db381..00000000 --- a/vendor/github.com/docker/libtrust/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# libtrust - -Libtrust is library for managing authentication and authorization using public key cryptography. - -Authentication is handled using the identity attached to the public key. -Libtrust provides multiple methods to prove possession of the private key associated with an identity. - - TLS x509 certificates - - Signature verification - - Key Challenge - -Authorization and access control is managed through a distributed trust graph. -Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. - -## Copyright and license - -Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. -Docs released under Creative commons. - diff --git a/vendor/github.com/docker/libtrust/certificates.go b/vendor/github.com/docker/libtrust/certificates.go deleted file mode 100644 index 3dcca33c..00000000 --- a/vendor/github.com/docker/libtrust/certificates.go +++ /dev/null @@ -1,175 +0,0 @@ -package libtrust - -import ( - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "io/ioutil" - "math/big" - "net" - "time" -) - -type certTemplateInfo struct { - commonName string - domains []string - ipAddresses []net.IP - isCA bool - clientAuth bool - serverAuth bool -} - -func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { - // Generate a certificate template which is valid from the past week to - // 10 years from now. The usage of the certificate depends on the - // specified fields in the given certTempInfo object. - var ( - keyUsage x509.KeyUsage - extKeyUsage []x509.ExtKeyUsage - ) - - if info.isCA { - keyUsage = x509.KeyUsageCertSign - } - - if info.clientAuth { - extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) - } - - if info.serverAuth { - extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) - } - - return &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: info.commonName, - }, - NotBefore: time.Now().Add(-time.Hour * 24 * 7), - NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), - DNSNames: info.domains, - IPAddresses: info.ipAddresses, - IsCA: info.isCA, - KeyUsage: keyUsage, - ExtKeyUsage: extKeyUsage, - BasicConstraintsValid: info.isCA, - } -} - -func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { - pubCertTemplate := generateCertTemplate(subInfo) - privCertTemplate := generateCertTemplate(issInfo) - - certDER, err := x509.CreateCertificate( - rand.Reader, pubCertTemplate, privCertTemplate, - pub.CryptoPublicKey(), priv.CryptoPrivateKey(), - ) - if err != nil { - return nil, fmt.Errorf("failed to create certificate: %s", err) - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, fmt.Errorf("failed to parse certificate: %s", err) - } - - return -} - -// GenerateSelfSignedServerCert creates a self-signed certificate for the -// given key which is to be used for TLS servers with the given domains and -// IP addresses. -func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { - info := &certTemplateInfo{ - commonName: key.KeyID(), - domains: domains, - ipAddresses: ipAddresses, - serverAuth: true, - } - - return generateCert(key.PublicKey(), key, info, info) -} - -// GenerateSelfSignedClientCert creates a self-signed certificate for the -// given key which is to be used for TLS clients. -func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { - info := &certTemplateInfo{ - commonName: key.KeyID(), - clientAuth: true, - } - - return generateCert(key.PublicKey(), key, info, info) -} - -// GenerateCACert creates a certificate which can be used as a trusted -// certificate authority. -func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { - subjectInfo := &certTemplateInfo{ - commonName: trustedKey.KeyID(), - isCA: true, - } - issuerInfo := &certTemplateInfo{ - commonName: signer.KeyID(), - } - - return generateCert(trustedKey, signer, subjectInfo, issuerInfo) -} - -// GenerateCACertPool creates a certificate authority pool to be used for a -// TLS configuration. Any self-signed certificates issued by the specified -// trusted keys will be verified during a TLS handshake -func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { - certPool := x509.NewCertPool() - - for _, trustedKey := range trustedKeys { - cert, err := GenerateCACert(signer, trustedKey) - if err != nil { - return nil, fmt.Errorf("failed to generate CA certificate: %s", err) - } - - certPool.AddCert(cert) - } - - return certPool, nil -} - -// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded -// containing one or more certificates. The expected pem type is "CERTIFICATE". -func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - certificates := []*x509.Certificate{} - var block *pem.Block - block, b = pem.Decode(b) - for ; block != nil; block, b = pem.Decode(b) { - if block.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - certificates = append(certificates, cert) - } else { - return nil, fmt.Errorf("invalid pem block type: %s", block.Type) - } - } - - return certificates, nil -} - -// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded -// containing one or more certificates. The expected pem type is "CERTIFICATE". -func LoadCertificatePool(filename string) (*x509.CertPool, error) { - certs, err := LoadCertificateBundle(filename) - if err != nil { - return nil, err - } - pool := x509.NewCertPool() - for _, cert := range certs { - pool.AddCert(cert) - } - return pool, nil -} diff --git a/vendor/github.com/docker/libtrust/certificates_test.go b/vendor/github.com/docker/libtrust/certificates_test.go deleted file mode 100644 index c111f353..00000000 --- a/vendor/github.com/docker/libtrust/certificates_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package libtrust - -import ( - "encoding/pem" - "io/ioutil" - "net" - "os" - "path" - "testing" -) - -func TestGenerateCertificates(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - _, err = GenerateSelfSignedServerCert(key, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}) - if err != nil { - t.Fatal(err) - } - - _, err = GenerateSelfSignedClientCert(key) - if err != nil { - t.Fatal(err) - } -} - -func TestGenerateCACertPool(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - caKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - caKey2, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - _, err = GenerateCACertPool(key, []PublicKey{caKey1.PublicKey(), caKey2.PublicKey()}) - if err != nil { - t.Fatal(err) - } -} - -func TestLoadCertificates(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - caKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - caKey2, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - cert1, err := GenerateCACert(caKey1, key) - if err != nil { - t.Fatal(err) - } - cert2, err := GenerateCACert(caKey2, key) - if err != nil { - t.Fatal(err) - } - - d, err := ioutil.TempDir("/tmp", "cert-test") - if err != nil { - t.Fatal(err) - } - caFile := path.Join(d, "ca.pem") - f, err := os.OpenFile(caFile, os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - t.Fatal(err) - } - - err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert1.Raw}) - if err != nil { - t.Fatal(err) - } - err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert2.Raw}) - if err != nil { - t.Fatal(err) - } - f.Close() - - certs, err := LoadCertificateBundle(caFile) - if err != nil { - t.Fatal(err) - } - if len(certs) != 2 { - t.Fatalf("Wrong number of certs received, expected: %d, received %d", 2, len(certs)) - } - - pool, err := LoadCertificatePool(caFile) - if err != nil { - t.Fatal(err) - } - - if len(pool.Subjects()) != 2 { - t.Fatalf("Invalid certificate pool") - } -} diff --git a/vendor/github.com/docker/libtrust/doc.go b/vendor/github.com/docker/libtrust/doc.go deleted file mode 100644 index ec5d2159..00000000 --- a/vendor/github.com/docker/libtrust/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -/* -Package libtrust provides an interface for managing authentication and -authorization using public key cryptography. Authentication is handled -using the identity attached to the public key and verified through TLS -x509 certificates, a key challenge, or signature. Authorization and -access control is managed through a trust graph distributed between -both remote trust servers and locally cached and managed data. -*/ -package libtrust diff --git a/vendor/github.com/docker/libtrust/ec_key.go b/vendor/github.com/docker/libtrust/ec_key.go deleted file mode 100644 index 00bbe4b3..00000000 --- a/vendor/github.com/docker/libtrust/ec_key.go +++ /dev/null @@ -1,428 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" -) - -/* - * EC DSA PUBLIC KEY - */ - -// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital -// signature algorithms. -type ecPublicKey struct { - *ecdsa.PublicKey - curveName string - signatureAlgorithm *signatureAlgorithm - extended map[string]interface{} -} - -func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { - curve := cryptoPublicKey.Curve - - switch { - case curve == elliptic.P256(): - return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil - case curve == elliptic.P384(): - return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil - case curve == elliptic.P521(): - return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil - default: - return nil, errors.New("unsupported elliptic curve") - } -} - -// KeyType returns the key type for elliptic curve keys, i.e., "EC". -func (k *ecPublicKey) KeyType() string { - return "EC" -} - -// CurveName returns the elliptic curve identifier. -// Possible values are "P-256", "P-384", and "P-521". -func (k *ecPublicKey) CurveName() string { - return k.curveName -} - -// KeyID returns a distinct identifier which is unique to this Public Key. -func (k *ecPublicKey) KeyID() string { - return keyIDFromCryptoKey(k) -} - -func (k *ecPublicKey) String() string { - return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) -} - -// Verify verifyies the signature of the data in the io.Reader using this -// PublicKey. The alg parameter should identify the digital signature -// algorithm which was used to produce the signature and should be supported -// by this public key. Returns a nil error if the signature is valid. -func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { - // For EC keys there is only one supported signature algorithm depending - // on the curve parameters. - if k.signatureAlgorithm.HeaderParam() != alg { - return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) - } - - // signature is the concatenation of (r, s), base64Url encoded. - sigLength := len(signature) - expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) - if sigLength != expectedOctetLength { - return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) - } - - rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] - r := new(big.Int).SetBytes(rBytes) - s := new(big.Int).SetBytes(sBytes) - - hasher := k.signatureAlgorithm.HashID().New() - _, err := io.Copy(hasher, data) - if err != nil { - return fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - if !ecdsa.Verify(k.PublicKey, hash, r, s) { - return errors.New("invalid signature") - } - - return nil -} - -// CryptoPublicKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { - return k.PublicKey -} - -func (k *ecPublicKey) toMap() map[string]interface{} { - jwk := make(map[string]interface{}) - for k, v := range k.extended { - jwk[k] = v - } - jwk["kty"] = k.KeyType() - jwk["kid"] = k.KeyID() - jwk["crv"] = k.CurveName() - - xBytes := k.X.Bytes() - yBytes := k.Y.Bytes() - octetLength := (k.Params().BitSize + 7) >> 3 - // MUST include leading zeros in the output so that x, y are each - // *octetLength* bytes long. - xBuf := make([]byte, octetLength-len(xBytes), octetLength) - yBuf := make([]byte, octetLength-len(yBytes), octetLength) - xBuf = append(xBuf, xBytes...) - yBuf = append(yBuf, yBytes...) - - jwk["x"] = joseBase64UrlEncode(xBuf) - jwk["y"] = joseBase64UrlEncode(yBuf) - - return jwk -} - -// MarshalJSON serializes this Public Key using the JWK JSON serialization format for -// elliptic curve keys. -func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Public Key to DER-encoded PKIX format. -func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) - } - k.extended["kid"] = k.KeyID() // For display purposes. - return createPemBlock("PUBLIC KEY", derBytes, k.extended) -} - -func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { - k.extended[field] = value -} - -func (k *ecPublicKey) GetExtendedField(field string) interface{} { - v, ok := k.extended[field] - if !ok { - return nil - } - return v -} - -func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { - // JWK key type (kty) has already been determined to be "EC". - // Need to extract 'crv', 'x', 'y', and 'kid' and check for - // consistency. - - // Get the curve identifier value. - crv, err := stringFromMap(jwk, "crv") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) - } - - var ( - curve elliptic.Curve - sigAlg *signatureAlgorithm - ) - - switch { - case crv == "P-256": - curve = elliptic.P256() - sigAlg = es256 - case crv == "P-384": - curve = elliptic.P384() - sigAlg = es384 - case crv == "P-521": - curve = elliptic.P521() - sigAlg = es512 - default: - return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) - } - - // Get the X and Y coordinates for the public key point. - xB64Url, err := stringFromMap(jwk, "x") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) - } - x, err := parseECCoordinate(xB64Url, curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) - } - - yB64Url, err := stringFromMap(jwk, "y") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) - } - y, err := parseECCoordinate(yB64Url, curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) - } - - key := &ecPublicKey{ - PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, - curveName: crv, signatureAlgorithm: sigAlg, - } - - // Key ID is optional too, but if it exists, it should match the key. - _, ok := jwk["kid"] - if ok { - kid, err := stringFromMap(jwk, "kid") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) - } - if kid != key.KeyID() { - return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) - } - } - - key.extended = jwk - - return key, nil -} - -/* - * EC DSA PRIVATE KEY - */ - -// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature -// algorithms. -type ecPrivateKey struct { - ecPublicKey - *ecdsa.PrivateKey -} - -func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { - publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) - if err != nil { - return nil, err - } - - return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil -} - -// PublicKey returns the Public Key data associated with this Private Key. -func (k *ecPrivateKey) PublicKey() PublicKey { - return &k.ecPublicKey -} - -func (k *ecPrivateKey) String() string { - return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) -} - -// Sign signs the data read from the io.Reader using a signature algorithm supported -// by the elliptic curve private key. If the specified hashing algorithm is -// supported by this key, that hash function is used to generate the signature -// otherwise the the default hashing algorithm for this key is used. Returns -// the signature and the name of the JWK signature algorithm used, e.g., -// "ES256", "ES384", "ES512". -func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { - // Generate a signature of the data using the internal alg. - // The given hashId is only a suggestion, and since EC keys only support - // on signature/hash algorithm given the curve name, we disregard it for - // the elliptic curve JWK signature implementation. - hasher := k.signatureAlgorithm.HashID().New() - _, err = io.Copy(hasher, data) - if err != nil { - return nil, "", fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) - if err != nil { - return nil, "", fmt.Errorf("error producing signature: %s", err) - } - rBytes, sBytes := r.Bytes(), s.Bytes() - octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 - // MUST include leading zeros in the output - rBuf := make([]byte, octetLength-len(rBytes), octetLength) - sBuf := make([]byte, octetLength-len(sBytes), octetLength) - - rBuf = append(rBuf, rBytes...) - sBuf = append(sBuf, sBytes...) - - signature = append(rBuf, sBuf...) - alg = k.signatureAlgorithm.HeaderParam() - - return -} - -// CryptoPrivateKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { - return k.PrivateKey -} - -func (k *ecPrivateKey) toMap() map[string]interface{} { - jwk := k.ecPublicKey.toMap() - - dBytes := k.D.Bytes() - // The length of this octet string MUST be ceiling(log-base-2(n)/8) - // octets (where n is the order of the curve). This is because the private - // key d must be in the interval [1, n-1] so the bitlength of d should be - // no larger than the bitlength of n-1. The easiest way to find the octet - // length is to take bitlength(n-1), add 7 to force a carry, and shift this - // bit sequence right by 3, which is essentially dividing by 8 and adding - // 1 if there is any remainder. Thus, the private key value d should be - // output to (bitlength(n-1)+7)>>3 octets. - n := k.ecPublicKey.Params().N - octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 - // Create a buffer with the necessary zero-padding. - dBuf := make([]byte, octetLength-len(dBytes), octetLength) - dBuf = append(dBuf, dBytes...) - - jwk["d"] = joseBase64UrlEncode(dBuf) - - return jwk -} - -// MarshalJSON serializes this Private Key using the JWK JSON serialization format for -// elliptic curve keys. -func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Private Key to DER-encoded PKIX format. -func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) - } - k.extended["keyID"] = k.KeyID() // For display purposes. - return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) -} - -func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { - dB64Url, err := stringFromMap(jwk, "d") - if err != nil { - return nil, fmt.Errorf("JWK EC Private Key: %s", err) - } - - // JWK key type (kty) has already been determined to be "EC". - // Need to extract the public key information, then extract the private - // key value 'd'. - publicKey, err := ecPublicKeyFromMap(jwk) - if err != nil { - return nil, err - } - - d, err := parseECPrivateParam(dB64Url, publicKey.Curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) - } - - key := &ecPrivateKey{ - ecPublicKey: *publicKey, - PrivateKey: &ecdsa.PrivateKey{ - PublicKey: *publicKey.PublicKey, - D: d, - }, - } - - return key, nil -} - -/* - * Key Generation Functions. - */ - -func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { - k = new(ecPrivateKey) - k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) - if err != nil { - return nil, err - } - - k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey - k.extended = make(map[string]interface{}) - - return -} - -// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. -func GenerateECP256PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P256()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-256 key: %s", err) - } - - k.curveName = "P-256" - k.signatureAlgorithm = es256 - - return k, nil -} - -// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. -func GenerateECP384PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P384()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-384 key: %s", err) - } - - k.curveName = "P-384" - k.signatureAlgorithm = es384 - - return k, nil -} - -// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. -func GenerateECP521PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P521()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-521 key: %s", err) - } - - k.curveName = "P-521" - k.signatureAlgorithm = es512 - - return k, nil -} diff --git a/vendor/github.com/docker/libtrust/ec_key_test.go b/vendor/github.com/docker/libtrust/ec_key_test.go deleted file mode 100644 index 26ac3814..00000000 --- a/vendor/github.com/docker/libtrust/ec_key_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package libtrust - -import ( - "bytes" - "encoding/json" - "testing" -) - -func generateECTestKeys(t *testing.T) []PrivateKey { - p256Key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - p384Key, err := GenerateECP384PrivateKey() - if err != nil { - t.Fatal(err) - } - - p521Key, err := GenerateECP521PrivateKey() - if err != nil { - t.Fatal(err) - } - - return []PrivateKey{p256Key, p384Key, p521Key} -} - -func TestECKeys(t *testing.T) { - ecKeys := generateECTestKeys(t) - - for _, ecKey := range ecKeys { - if ecKey.KeyType() != "EC" { - t.Fatalf("key type must be %q, instead got %q", "EC", ecKey.KeyType()) - } - } -} - -func TestECSignVerify(t *testing.T) { - ecKeys := generateECTestKeys(t) - - message := "Hello, World!" - data := bytes.NewReader([]byte(message)) - - sigAlgs := []*signatureAlgorithm{es256, es384, es512} - - for i, ecKey := range ecKeys { - sigAlg := sigAlgs[i] - - t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, ecKey.KeyID()) - - data.Seek(0, 0) // Reset the byte reader - - // Sign - sig, alg, err := ecKey.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - - // Verify - err = ecKey.Verify(data, alg, sig) - if err != nil { - t.Fatal(err) - } - } -} - -func TestMarshalUnmarshalECKeys(t *testing.T) { - ecKeys := generateECTestKeys(t) - data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) - sigAlgs := []*signatureAlgorithm{es256, es384, es512} - - for i, ecKey := range ecKeys { - sigAlg := sigAlgs[i] - privateJWKJSON, err := json.MarshalIndent(ecKey, "", " ") - if err != nil { - t.Fatal(err) - } - - publicJWKJSON, err := json.MarshalIndent(ecKey.PublicKey(), "", " ") - if err != nil { - t.Fatal(err) - } - - t.Logf("JWK Private Key: %s", string(privateJWKJSON)) - t.Logf("JWK Public Key: %s", string(publicJWKJSON)) - - privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) - if err != nil { - t.Fatal(err) - } - - pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) - if err != nil { - t.Fatal(err) - } - - // Ensure we can sign/verify a message with the unmarshalled keys. - data.Seek(0, 0) // Reset the byte reader - signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - err = pubKey2.Verify(data, alg, signature) - if err != nil { - t.Fatal(err) - } - } -} - -func TestFromCryptoECKeys(t *testing.T) { - ecKeys := generateECTestKeys(t) - - for _, ecKey := range ecKeys { - cryptoPrivateKey := ecKey.CryptoPrivateKey() - cryptoPublicKey := ecKey.CryptoPublicKey() - - pubKey, err := FromCryptoPublicKey(cryptoPublicKey) - if err != nil { - t.Fatal(err) - } - - if pubKey.KeyID() != ecKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - - privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) - if err != nil { - t.Fatal(err) - } - - if privKey.KeyID() != ecKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - } -} - -func TestExtendedFields(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - key.AddExtendedField("test", "foobar") - val := key.GetExtendedField("test") - - gotVal, ok := val.(string) - if !ok { - t.Fatalf("value is not a string") - } else if gotVal != val { - t.Fatalf("value %q is not equal to %q", gotVal, val) - } - -} diff --git a/vendor/github.com/docker/libtrust/filter.go b/vendor/github.com/docker/libtrust/filter.go deleted file mode 100644 index 5b2b4fca..00000000 --- a/vendor/github.com/docker/libtrust/filter.go +++ /dev/null @@ -1,50 +0,0 @@ -package libtrust - -import ( - "path/filepath" -) - -// FilterByHosts filters the list of PublicKeys to only those which contain a -// 'hosts' pattern which matches the given host. If *includeEmpty* is true, -// then keys which do not specify any hosts are also returned. -func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { - filtered := make([]PublicKey, 0, len(keys)) - - for _, pubKey := range keys { - var hosts []string - switch v := pubKey.GetExtendedField("hosts").(type) { - case []string: - hosts = v - case []interface{}: - for _, value := range v { - h, ok := value.(string) - if !ok { - continue - } - hosts = append(hosts, h) - } - } - - if len(hosts) == 0 { - if includeEmpty { - filtered = append(filtered, pubKey) - } - continue - } - - // Check if any hosts match pattern - for _, hostPattern := range hosts { - match, err := filepath.Match(hostPattern, host) - if err != nil { - return nil, err - } - - if match { - filtered = append(filtered, pubKey) - continue - } - } - } - - return filtered, nil -} diff --git a/vendor/github.com/docker/libtrust/filter_test.go b/vendor/github.com/docker/libtrust/filter_test.go deleted file mode 100644 index 997e554c..00000000 --- a/vendor/github.com/docker/libtrust/filter_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package libtrust - -import ( - "testing" -) - -func compareKeySlices(t *testing.T, sliceA, sliceB []PublicKey) { - if len(sliceA) != len(sliceB) { - t.Fatalf("slice size %d, expected %d", len(sliceA), len(sliceB)) - } - - for i, itemA := range sliceA { - itemB := sliceB[i] - if itemA != itemB { - t.Fatalf("slice index %d not equal: %#v != %#v", i, itemA, itemB) - } - } -} - -func TestFilter(t *testing.T) { - keys := make([]PublicKey, 0, 8) - - // Create 8 keys and add host entries. - for i := 0; i < cap(keys); i++ { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - // we use both []interface{} and []string here because jwt uses - // []interface{} format, while PEM uses []string - switch { - case i == 0: - // Don't add entries for this key, key 0. - break - case i%2 == 0: - // Should catch keys 2, 4, and 6. - key.AddExtendedField("hosts", []interface{}{"*.even.example.com"}) - case i == 7: - // Should catch only the last key, and make it match any hostname. - key.AddExtendedField("hosts", []string{"*"}) - default: - // should catch keys 1, 3, 5. - key.AddExtendedField("hosts", []string{"*.example.com"}) - } - - keys = append(keys, key) - } - - // Should match 2 keys, the empty one, and the one that matches all hosts. - matchedKeys, err := FilterByHosts(keys, "foo.bar.com", true) - if err != nil { - t.Fatal(err) - } - expectedMatch := []PublicKey{keys[0], keys[7]} - compareKeySlices(t, expectedMatch, matchedKeys) - - // Should match 1 key, the one that matches any host. - matchedKeys, err = FilterByHosts(keys, "foo.bar.com", false) - if err != nil { - t.Fatal(err) - } - expectedMatch = []PublicKey{keys[7]} - compareKeySlices(t, expectedMatch, matchedKeys) - - // Should match keys that end in "example.com", and the key that matches anything. - matchedKeys, err = FilterByHosts(keys, "foo.example.com", false) - if err != nil { - t.Fatal(err) - } - expectedMatch = []PublicKey{keys[1], keys[3], keys[5], keys[7]} - compareKeySlices(t, expectedMatch, matchedKeys) - - // Should match all of the keys except the empty key. - matchedKeys, err = FilterByHosts(keys, "foo.even.example.com", false) - if err != nil { - t.Fatal(err) - } - expectedMatch = keys[1:] - compareKeySlices(t, expectedMatch, matchedKeys) -} diff --git a/vendor/github.com/docker/libtrust/hash.go b/vendor/github.com/docker/libtrust/hash.go deleted file mode 100644 index a2df787d..00000000 --- a/vendor/github.com/docker/libtrust/hash.go +++ /dev/null @@ -1,56 +0,0 @@ -package libtrust - -import ( - "crypto" - _ "crypto/sha256" // Registrer SHA224 and SHA256 - _ "crypto/sha512" // Registrer SHA384 and SHA512 - "fmt" -) - -type signatureAlgorithm struct { - algHeaderParam string - hashID crypto.Hash -} - -func (h *signatureAlgorithm) HeaderParam() string { - return h.algHeaderParam -} - -func (h *signatureAlgorithm) HashID() crypto.Hash { - return h.hashID -} - -var ( - rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} - rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} - rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} - es256 = &signatureAlgorithm{"ES256", crypto.SHA256} - es384 = &signatureAlgorithm{"ES384", crypto.SHA384} - es512 = &signatureAlgorithm{"ES512", crypto.SHA512} -) - -func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { - switch { - case alg == "RS256": - return rs256, nil - case alg == "RS384": - return rs384, nil - case alg == "RS512": - return rs512, nil - default: - return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) - } -} - -func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { - switch { - case hashID == crypto.SHA512: - return rs512 - case hashID == crypto.SHA384: - return rs384 - case hashID == crypto.SHA256: - fallthrough - default: - return rs256 - } -} diff --git a/vendor/github.com/docker/libtrust/jsonsign.go b/vendor/github.com/docker/libtrust/jsonsign.go deleted file mode 100644 index cb2ca9a7..00000000 --- a/vendor/github.com/docker/libtrust/jsonsign.go +++ /dev/null @@ -1,657 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "sort" - "time" - "unicode" -) - -var ( - // ErrInvalidSignContent is used when the content to be signed is invalid. - ErrInvalidSignContent = errors.New("invalid sign content") - - // ErrInvalidJSONContent is used when invalid json is encountered. - ErrInvalidJSONContent = errors.New("invalid json content") - - // ErrMissingSignatureKey is used when the specified signature key - // does not exist in the JSON content. - ErrMissingSignatureKey = errors.New("missing signature key") -) - -type jsHeader struct { - JWK PublicKey `json:"jwk,omitempty"` - Algorithm string `json:"alg"` - Chain []string `json:"x5c,omitempty"` -} - -type jsSignature struct { - Header jsHeader `json:"header"` - Signature string `json:"signature"` - Protected string `json:"protected,omitempty"` -} - -type jsSignaturesSorted []jsSignature - -func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } -func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } - -func (jsbkid jsSignaturesSorted) Less(i, j int) bool { - ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() - si, sj := jsbkid[i].Signature, jsbkid[j].Signature - - if ki == kj { - return si < sj - } - - return ki < kj -} - -type signKey struct { - PrivateKey - Chain []*x509.Certificate -} - -// JSONSignature represents a signature of a json object. -type JSONSignature struct { - payload string - signatures []jsSignature - indent string - formatLength int - formatTail []byte -} - -func newJSONSignature() *JSONSignature { - return &JSONSignature{ - signatures: make([]jsSignature, 0, 1), - } -} - -// Payload returns the encoded payload of the signature. This -// payload should not be signed directly -func (js *JSONSignature) Payload() ([]byte, error) { - return joseBase64UrlDecode(js.payload) -} - -func (js *JSONSignature) protectedHeader() (string, error) { - protected := map[string]interface{}{ - "formatLength": js.formatLength, - "formatTail": joseBase64UrlEncode(js.formatTail), - "time": time.Now().UTC().Format(time.RFC3339), - } - protectedBytes, err := json.Marshal(protected) - if err != nil { - return "", err - } - - return joseBase64UrlEncode(protectedBytes), nil -} - -func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { - buf := make([]byte, len(js.payload)+len(protectedHeader)+1) - copy(buf, protectedHeader) - buf[len(protectedHeader)] = '.' - copy(buf[len(protectedHeader)+1:], js.payload) - return buf, nil -} - -// Sign adds a signature using the given private key. -func (js *JSONSignature) Sign(key PrivateKey) error { - protected, err := js.protectedHeader() - if err != nil { - return err - } - signBytes, err := js.signBytes(protected) - if err != nil { - return err - } - sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) - if err != nil { - return err - } - - js.signatures = append(js.signatures, jsSignature{ - Header: jsHeader{ - JWK: key.PublicKey(), - Algorithm: algorithm, - }, - Signature: joseBase64UrlEncode(sigBytes), - Protected: protected, - }) - - return nil -} - -// SignWithChain adds a signature using the given private key -// and setting the x509 chain. The public key of the first element -// in the chain must be the public key corresponding with the sign key. -func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { - // Ensure key.Chain[0] is public key for key - //key.Chain.PublicKey - //key.PublicKey().CryptoPublicKey() - - // Verify chain - protected, err := js.protectedHeader() - if err != nil { - return err - } - signBytes, err := js.signBytes(protected) - if err != nil { - return err - } - sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) - if err != nil { - return err - } - - header := jsHeader{ - Chain: make([]string, len(chain)), - Algorithm: algorithm, - } - - for i, cert := range chain { - header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) - } - - js.signatures = append(js.signatures, jsSignature{ - Header: header, - Signature: joseBase64UrlEncode(sigBytes), - Protected: protected, - }) - - return nil -} - -// Verify verifies all the signatures and returns the list of -// public keys used to sign. Any x509 chains are not checked. -func (js *JSONSignature) Verify() ([]PublicKey, error) { - keys := make([]PublicKey, len(js.signatures)) - for i, signature := range js.signatures { - signBytes, err := js.signBytes(signature.Protected) - if err != nil { - return nil, err - } - var publicKey PublicKey - if len(signature.Header.Chain) > 0 { - certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) - if err != nil { - return nil, err - } - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - publicKey, err = FromCryptoPublicKey(cert.PublicKey) - if err != nil { - return nil, err - } - } else if signature.Header.JWK != nil { - publicKey = signature.Header.JWK - } else { - return nil, errors.New("missing public key") - } - - sigBytes, err := joseBase64UrlDecode(signature.Signature) - if err != nil { - return nil, err - } - - err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) - if err != nil { - return nil, err - } - - keys[i] = publicKey - } - return keys, nil -} - -// VerifyChains verifies all the signatures and the chains associated -// with each signature and returns the list of verified chains. -// Signatures without an x509 chain are not checked. -func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { - chains := make([][]*x509.Certificate, 0, len(js.signatures)) - for _, signature := range js.signatures { - signBytes, err := js.signBytes(signature.Protected) - if err != nil { - return nil, err - } - var publicKey PublicKey - if len(signature.Header.Chain) > 0 { - certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) - if err != nil { - return nil, err - } - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - publicKey, err = FromCryptoPublicKey(cert.PublicKey) - if err != nil { - return nil, err - } - intermediates := x509.NewCertPool() - if len(signature.Header.Chain) > 1 { - intermediateChain := signature.Header.Chain[1:] - for i := range intermediateChain { - certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) - if err != nil { - return nil, err - } - intermediate, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - intermediates.AddCert(intermediate) - } - } - - verifyOptions := x509.VerifyOptions{ - Intermediates: intermediates, - Roots: ca, - } - - verifiedChains, err := cert.Verify(verifyOptions) - if err != nil { - return nil, err - } - chains = append(chains, verifiedChains...) - - sigBytes, err := joseBase64UrlDecode(signature.Signature) - if err != nil { - return nil, err - } - - err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) - if err != nil { - return nil, err - } - } - - } - return chains, nil -} - -// JWS returns JSON serialized JWS according to -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 -func (js *JSONSignature) JWS() ([]byte, error) { - if len(js.signatures) == 0 { - return nil, errors.New("missing signature") - } - - sort.Sort(jsSignaturesSorted(js.signatures)) - - jsonMap := map[string]interface{}{ - "payload": js.payload, - "signatures": js.signatures, - } - - return json.MarshalIndent(jsonMap, "", " ") -} - -func notSpace(r rune) bool { - return !unicode.IsSpace(r) -} - -func detectJSONIndent(jsonContent []byte) (indent string) { - if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { - quoteIndex := bytes.IndexRune(jsonContent[1:], '"') - if quoteIndex > 0 { - indent = string(jsonContent[2 : quoteIndex+1]) - } - } - return -} - -type jsParsedHeader struct { - JWK json.RawMessage `json:"jwk"` - Algorithm string `json:"alg"` - Chain []string `json:"x5c"` -} - -type jsParsedSignature struct { - Header jsParsedHeader `json:"header"` - Signature string `json:"signature"` - Protected string `json:"protected"` -} - -// ParseJWS parses a JWS serialized JSON object into a Json Signature. -func ParseJWS(content []byte) (*JSONSignature, error) { - type jsParsed struct { - Payload string `json:"payload"` - Signatures []jsParsedSignature `json:"signatures"` - } - parsed := &jsParsed{} - err := json.Unmarshal(content, parsed) - if err != nil { - return nil, err - } - if len(parsed.Signatures) == 0 { - return nil, errors.New("missing signatures") - } - payload, err := joseBase64UrlDecode(parsed.Payload) - if err != nil { - return nil, err - } - - js, err := NewJSONSignature(payload) - if err != nil { - return nil, err - } - js.signatures = make([]jsSignature, len(parsed.Signatures)) - for i, signature := range parsed.Signatures { - header := jsHeader{ - Algorithm: signature.Header.Algorithm, - } - if signature.Header.Chain != nil { - header.Chain = signature.Header.Chain - } - if signature.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) - if err != nil { - return nil, err - } - header.JWK = publicKey - } - js.signatures[i] = jsSignature{ - Header: header, - Signature: signature.Signature, - Protected: signature.Protected, - } - } - - return js, nil -} - -// NewJSONSignature returns a new unsigned JWS from a json byte array. -// JSONSignature will need to be signed before serializing or storing. -// Optionally, one or more signatures can be provided as byte buffers, -// containing serialized JWS signatures, to assemble a fully signed JWS -// package. It is the callers responsibility to ensure uniqueness of the -// provided signatures. -func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { - var dataMap map[string]interface{} - err := json.Unmarshal(content, &dataMap) - if err != nil { - return nil, err - } - - js := newJSONSignature() - js.indent = detectJSONIndent(content) - - js.payload = joseBase64UrlEncode(content) - - // Find trailing } and whitespace, put in protected header - closeIndex := bytes.LastIndexFunc(content, notSpace) - if content[closeIndex] != '}' { - return nil, ErrInvalidJSONContent - } - lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) - if content[lastRuneIndex] == ',' { - return nil, ErrInvalidJSONContent - } - js.formatLength = lastRuneIndex + 1 - js.formatTail = content[js.formatLength:] - - if len(signatures) > 0 { - for _, signature := range signatures { - var parsedJSig jsParsedSignature - - if err := json.Unmarshal(signature, &parsedJSig); err != nil { - return nil, err - } - - // TODO(stevvooe): A lot of the code below is repeated in - // ParseJWS. It will require more refactoring to fix that. - jsig := jsSignature{ - Header: jsHeader{ - Algorithm: parsedJSig.Header.Algorithm, - }, - Signature: parsedJSig.Signature, - Protected: parsedJSig.Protected, - } - - if parsedJSig.Header.Chain != nil { - jsig.Header.Chain = parsedJSig.Header.Chain - } - - if parsedJSig.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) - if err != nil { - return nil, err - } - jsig.Header.JWK = publicKey - } - - js.signatures = append(js.signatures, jsig) - } - } - - return js, nil -} - -// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or -// struct. JWS will need to be signed before serializing or storing. -func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { - switch content.(type) { - case map[string]interface{}: - case struct{}: - default: - return nil, errors.New("invalid data type") - } - - js := newJSONSignature() - js.indent = " " - - payload, err := json.MarshalIndent(content, "", js.indent) - if err != nil { - return nil, err - } - js.payload = joseBase64UrlEncode(payload) - - // Remove '\n}' from formatted section, put in protected header - js.formatLength = len(payload) - 2 - js.formatTail = payload[js.formatLength:] - - return js, nil -} - -func readIntFromMap(key string, m map[string]interface{}) (int, bool) { - value, ok := m[key] - if !ok { - return 0, false - } - switch v := value.(type) { - case int: - return v, true - case float64: - return int(v), true - default: - return 0, false - } -} - -func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { - value, ok := m[key] - if !ok { - return "", false - } - v, ok = value.(string) - return -} - -// ParsePrettySignature parses a formatted signature into a -// JSON signature. If the signatures are missing the format information -// an error is thrown. The formatted signature must be created by -// the same method as format signature. -func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { - var contentMap map[string]json.RawMessage - err := json.Unmarshal(content, &contentMap) - if err != nil { - return nil, fmt.Errorf("error unmarshalling content: %s", err) - } - sigMessage, ok := contentMap[signatureKey] - if !ok { - return nil, ErrMissingSignatureKey - } - - var signatureBlocks []jsParsedSignature - err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) - if err != nil { - return nil, fmt.Errorf("error unmarshalling signatures: %s", err) - } - - js := newJSONSignature() - js.signatures = make([]jsSignature, len(signatureBlocks)) - - for i, signatureBlock := range signatureBlocks { - protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) - if err != nil { - return nil, fmt.Errorf("base64 decode error: %s", err) - } - var protectedHeader map[string]interface{} - err = json.Unmarshal(protectedBytes, &protectedHeader) - if err != nil { - return nil, fmt.Errorf("error unmarshalling protected header: %s", err) - } - - formatLength, ok := readIntFromMap("formatLength", protectedHeader) - if !ok { - return nil, errors.New("missing formatted length") - } - encodedTail, ok := readStringFromMap("formatTail", protectedHeader) - if !ok { - return nil, errors.New("missing formatted tail") - } - formatTail, err := joseBase64UrlDecode(encodedTail) - if err != nil { - return nil, fmt.Errorf("base64 decode error on tail: %s", err) - } - if js.formatLength == 0 { - js.formatLength = formatLength - } else if js.formatLength != formatLength { - return nil, errors.New("conflicting format length") - } - if len(js.formatTail) == 0 { - js.formatTail = formatTail - } else if bytes.Compare(js.formatTail, formatTail) != 0 { - return nil, errors.New("conflicting format tail") - } - - header := jsHeader{ - Algorithm: signatureBlock.Header.Algorithm, - Chain: signatureBlock.Header.Chain, - } - if signatureBlock.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) - if err != nil { - return nil, fmt.Errorf("error unmarshalling public key: %s", err) - } - header.JWK = publicKey - } - js.signatures[i] = jsSignature{ - Header: header, - Signature: signatureBlock.Signature, - Protected: signatureBlock.Protected, - } - } - if js.formatLength > len(content) { - return nil, errors.New("invalid format length") - } - formatted := make([]byte, js.formatLength+len(js.formatTail)) - copy(formatted, content[:js.formatLength]) - copy(formatted[js.formatLength:], js.formatTail) - js.indent = detectJSONIndent(formatted) - js.payload = joseBase64UrlEncode(formatted) - - return js, nil -} - -// PrettySignature formats a json signature into an easy to read -// single json serialized object. -func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { - if len(js.signatures) == 0 { - return nil, errors.New("no signatures") - } - payload, err := joseBase64UrlDecode(js.payload) - if err != nil { - return nil, err - } - payload = payload[:js.formatLength] - - sort.Sort(jsSignaturesSorted(js.signatures)) - - var marshalled []byte - var marshallErr error - if js.indent != "" { - marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) - } else { - marshalled, marshallErr = json.Marshal(js.signatures) - } - if marshallErr != nil { - return nil, marshallErr - } - - buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) - buf.Write(payload) - buf.WriteByte(',') - if js.indent != "" { - buf.WriteByte('\n') - buf.WriteString(js.indent) - buf.WriteByte('"') - buf.WriteString(signatureKey) - buf.WriteString("\": ") - buf.Write(marshalled) - buf.WriteByte('\n') - } else { - buf.WriteByte('"') - buf.WriteString(signatureKey) - buf.WriteString("\":") - buf.Write(marshalled) - } - buf.WriteByte('}') - - return buf.Bytes(), nil -} - -// Signatures provides the signatures on this JWS as opaque blobs, sorted by -// keyID. These blobs can be stored and reassembled with payloads. Internally, -// they are simply marshaled json web signatures but implementations should -// not rely on this. -func (js *JSONSignature) Signatures() ([][]byte, error) { - sort.Sort(jsSignaturesSorted(js.signatures)) - - var sb [][]byte - for _, jsig := range js.signatures { - p, err := json.Marshal(jsig) - if err != nil { - return nil, err - } - - sb = append(sb, p) - } - - return sb, nil -} - -// Merge combines the signatures from one or more other signatures into the -// method receiver. If the payloads differ for any argument, an error will be -// returned and the receiver will not be modified. -func (js *JSONSignature) Merge(others ...*JSONSignature) error { - merged := js.signatures - for _, other := range others { - if js.payload != other.payload { - return fmt.Errorf("payloads differ from merge target") - } - merged = append(merged, other.signatures...) - } - - js.signatures = merged - return nil -} diff --git a/vendor/github.com/docker/libtrust/jsonsign_test.go b/vendor/github.com/docker/libtrust/jsonsign_test.go deleted file mode 100644 index b4f26979..00000000 --- a/vendor/github.com/docker/libtrust/jsonsign_test.go +++ /dev/null @@ -1,380 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto/rand" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "testing" - - "github.com/docker/libtrust/testutil" -) - -func createTestJSON(sigKey string, indent string) (map[string]interface{}, []byte) { - testMap := map[string]interface{}{ - "name": "dmcgowan/mycontainer", - "config": map[string]interface{}{ - "ports": []int{9101, 9102}, - "run": "/bin/echo \"Hello\"", - }, - "layers": []string{ - "2893c080-27f5-11e4-8c21-0800200c9a66", - "c54bc25b-fbb2-497b-a899-a8bc1b5b9d55", - "4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4", - "0b6da891-7f7f-4abf-9c97-7887549e696c", - "1d960389-ae4f-4011-85fd-18d0f96a67ad", - }, - } - formattedSection := `{"config":{"ports":[9101,9102],"run":"/bin/echo \"Hello\""},"layers":["2893c080-27f5-11e4-8c21-0800200c9a66","c54bc25b-fbb2-497b-a899-a8bc1b5b9d55","4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4","0b6da891-7f7f-4abf-9c97-7887549e696c","1d960389-ae4f-4011-85fd-18d0f96a67ad"],"name":"dmcgowan/mycontainer","%s":[{"header":{` - formattedSection = fmt.Sprintf(formattedSection, sigKey) - if indent != "" { - buf := bytes.NewBuffer(nil) - json.Indent(buf, []byte(formattedSection), "", indent) - return testMap, buf.Bytes() - } - return testMap, []byte(formattedSection) - -} - -func TestSignJSON(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, _ := createTestJSON("buildSignatures", " ") - indented, err := json.MarshalIndent(testMap, "", " ") - if err != nil { - t.Fatalf("Marshall error: %s", err) - } - - js, err := NewJSONSignature(indented) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing content: %s", err) - } - - keys, err := js.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } - -} - -func TestSignMap(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, _ := createTestJSON("buildSignatures", " ") - js, err := NewJSONSignatureFromMap(testMap) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing JSON signature: %s", err) - } - - keys, err := js.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } -} - -func TestFormattedJson(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, firstSection := createTestJSON("buildSignatures", " ") - indented, err := json.MarshalIndent(testMap, "", " ") - if err != nil { - t.Fatalf("Marshall error: %s", err) - } - - js, err := NewJSONSignature(indented) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing content: %s", err) - } - - b, err := js.PrettySignature("buildSignatures") - if err != nil { - t.Fatalf("Error signing map: %s", err) - } - - if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { - t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) - } - - parsed, err := ParsePrettySignature(b, "buildSignatures") - if err != nil { - t.Fatalf("Error parsing formatted signature: %s", err) - } - - keys, err := parsed.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } - - var unmarshalled map[string]interface{} - err = json.Unmarshal(b, &unmarshalled) - if err != nil { - t.Fatalf("Could not unmarshall after parse: %s", err) - } - -} - -func TestFormattedFlatJson(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating EC key: %s", err) - } - - testMap, firstSection := createTestJSON("buildSignatures", "") - unindented, err := json.Marshal(testMap) - if err != nil { - t.Fatalf("Marshall error: %s", err) - } - - js, err := NewJSONSignature(unindented) - if err != nil { - t.Fatalf("Error creating JSON signature: %s", err) - } - err = js.Sign(key) - if err != nil { - t.Fatalf("Error signing JSON signature: %s", err) - } - - b, err := js.PrettySignature("buildSignatures") - if err != nil { - t.Fatalf("Error signing map: %s", err) - } - - if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { - t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) - } - - parsed, err := ParsePrettySignature(b, "buildSignatures") - if err != nil { - t.Fatalf("Error parsing formatted signature: %s", err) - } - - keys, err := parsed.Verify() - if err != nil { - t.Fatalf("Error verifying signature: %s", err) - } - if len(keys) != 1 { - t.Fatalf("Error wrong number of keys returned") - } - if keys[0].KeyID() != key.KeyID() { - t.Fatalf("Unexpected public key returned") - } -} - -func generateTrustChain(t *testing.T, key PrivateKey, ca *x509.Certificate) (PrivateKey, []*x509.Certificate) { - parent := ca - parentKey := key - chain := make([]*x509.Certificate, 6) - for i := 5; i > 0; i-- { - intermediatekey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generate key: %s", err) - } - chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) - if err != nil { - t.Fatalf("Error generating intermdiate certificate: %s", err) - } - parent = chain[i] - parentKey = intermediatekey - } - trustKey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generate key: %s", err) - } - chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) - if err != nil { - t.Fatalf("Error generate trust cert: %s", err) - } - - return trustKey, chain -} - -func TestChainVerify(t *testing.T) { - caKey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating key: %s", err) - } - ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) - if err != nil { - t.Fatalf("Error generating ca: %s", err) - } - trustKey, chain := generateTrustChain(t, caKey, ca) - - testMap, _ := createTestJSON("verifySignatures", " ") - js, err := NewJSONSignatureFromMap(testMap) - if err != nil { - t.Fatalf("Error creating JSONSignature from map: %s", err) - } - - err = js.SignWithChain(trustKey, chain) - if err != nil { - t.Fatalf("Error signing with chain: %s", err) - } - - pool := x509.NewCertPool() - pool.AddCert(ca) - chains, err := js.VerifyChains(pool) - if err != nil { - t.Fatalf("Error verifying content: %s", err) - } - if len(chains) != 1 { - t.Fatalf("Unexpected chains length: %d", len(chains)) - } - if len(chains[0]) != 7 { - t.Fatalf("Unexpected chain length: %d", len(chains[0])) - } -} - -func TestInvalidChain(t *testing.T) { - caKey, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("Error generating key: %s", err) - } - ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) - if err != nil { - t.Fatalf("Error generating ca: %s", err) - } - trustKey, chain := generateTrustChain(t, caKey, ca) - - testMap, _ := createTestJSON("verifySignatures", " ") - js, err := NewJSONSignatureFromMap(testMap) - if err != nil { - t.Fatalf("Error creating JSONSignature from map: %s", err) - } - - err = js.SignWithChain(trustKey, chain[:5]) - if err != nil { - t.Fatalf("Error signing with chain: %s", err) - } - - pool := x509.NewCertPool() - pool.AddCert(ca) - chains, err := js.VerifyChains(pool) - if err == nil { - t.Fatalf("Expected error verifying with bad chain") - } - if len(chains) != 0 { - t.Fatalf("Unexpected chains returned from invalid verify") - } -} - -func TestMergeSignatures(t *testing.T) { - pk1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key 1: %v", err) - } - - pk2, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatalf("unexpected error generating private key 2: %v", err) - } - - payload := make([]byte, 1<<10) - if _, err = io.ReadFull(rand.Reader, payload); err != nil { - t.Fatalf("error generating payload: %v", err) - } - - payload, _ = json.Marshal(map[string]interface{}{"data": payload}) - - sig1, err := NewJSONSignature(payload) - if err != nil { - t.Fatalf("unexpected error creating signature 1: %v", err) - } - - if err := sig1.Sign(pk1); err != nil { - t.Fatalf("unexpected error signing with pk1: %v", err) - } - - sig2, err := NewJSONSignature(payload) - if err != nil { - t.Fatalf("unexpected error creating signature 2: %v", err) - } - - if err := sig2.Sign(pk2); err != nil { - t.Fatalf("unexpected error signing with pk2: %v", err) - } - - // Now, we actually merge into sig1 - if err := sig1.Merge(sig2); err != nil { - t.Fatalf("unexpected error merging: %v", err) - } - - // Verify the new signature package - pubkeys, err := sig1.Verify() - if err != nil { - t.Fatalf("unexpected error during verify: %v", err) - } - - // Make sure the pubkeys match the two private keys from before - privkeys := map[string]PrivateKey{ - pk1.KeyID(): pk1, - pk2.KeyID(): pk2, - } - - found := map[string]struct{}{} - - for _, pubkey := range pubkeys { - if _, ok := privkeys[pubkey.KeyID()]; !ok { - t.Fatalf("unexpected public key found during verification: %v", pubkey) - } - - found[pubkey.KeyID()] = struct{}{} - } - - // Make sure we've found all the private keys from verification - for keyid, _ := range privkeys { - if _, ok := found[keyid]; !ok { - t.Fatalf("public key %v not found during verification", keyid) - } - } - - // Create another signature, with a different payload, and ensure we get an error. - sig3, err := NewJSONSignature([]byte("{}")) - if err != nil { - t.Fatalf("unexpected error making signature for sig3: %v", err) - } - - if err := sig1.Merge(sig3); err == nil { - t.Fatalf("error expected during invalid merge with different payload") - } -} diff --git a/vendor/github.com/docker/libtrust/key.go b/vendor/github.com/docker/libtrust/key.go deleted file mode 100644 index 73642db2..00000000 --- a/vendor/github.com/docker/libtrust/key.go +++ /dev/null @@ -1,253 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" -) - -// PublicKey is a generic interface for a Public Key. -type PublicKey interface { - // KeyType returns the key type for this key. For elliptic curve keys, - // this value should be "EC". For RSA keys, this value should be "RSA". - KeyType() string - // KeyID returns a distinct identifier which is unique to this Public Key. - // The format generated by this library is a base32 encoding of a 240 bit - // hash of the public key data divided into 12 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - KeyID() string - // Verify verifyies the signature of the data in the io.Reader using this - // Public Key. The alg parameter should identify the digital signature - // algorithm which was used to produce the signature and should be - // supported by this public key. Returns a nil error if the signature - // is valid. - Verify(data io.Reader, alg string, signature []byte) error - // CryptoPublicKey returns the internal object which can be used as a - // crypto.PublicKey for use with other standard library operations. The type - // is either *rsa.PublicKey or *ecdsa.PublicKey - CryptoPublicKey() crypto.PublicKey - // These public keys can be serialized to the standard JSON encoding for - // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web - // Algorithms. - MarshalJSON() ([]byte, error) - // These keys can also be serialized to the standard PEM encoding. - PEMBlock() (*pem.Block, error) - // The string representation of a key is its key type and ID. - String() string - AddExtendedField(string, interface{}) - GetExtendedField(string) interface{} -} - -// PrivateKey is a generic interface for a Private Key. -type PrivateKey interface { - // A PrivateKey contains all fields and methods of a PublicKey of the - // same type. The MarshalJSON method also outputs the private key as a - // JSON Web Key, and the PEMBlock method outputs the private key as a - // PEM block. - PublicKey - // PublicKey returns the PublicKey associated with this PrivateKey. - PublicKey() PublicKey - // Sign signs the data read from the io.Reader using a signature algorithm - // supported by the private key. If the specified hashing algorithm is - // supported by this key, that hash function is used to generate the - // signature otherwise the the default hashing algorithm for this key is - // used. Returns the signature and identifier of the algorithm used. - Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) - // CryptoPrivateKey returns the internal object which can be used as a - // crypto.PublicKey for use with other standard library operations. The - // type is either *rsa.PublicKey or *ecdsa.PublicKey - CryptoPrivateKey() crypto.PrivateKey -} - -// FromCryptoPublicKey returns a libtrust PublicKey representation of the given -// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given -// key is of an unsupported type. -func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { - switch cryptoPublicKey := cryptoPublicKey.(type) { - case *ecdsa.PublicKey: - return fromECPublicKey(cryptoPublicKey) - case *rsa.PublicKey: - return fromRSAPublicKey(cryptoPublicKey), nil - default: - return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) - } -} - -// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given -// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given -// key is of an unsupported type. -func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { - switch cryptoPrivateKey := cryptoPrivateKey.(type) { - case *ecdsa.PrivateKey: - return fromECPrivateKey(cryptoPrivateKey) - case *rsa.PrivateKey: - return fromRSAPrivateKey(cryptoPrivateKey), nil - default: - return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) - } -} - -// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust -// PublicKey or an error if there is a problem with the encoding. -func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, errors.New("unable to find PEM encoded data") - } else if pemBlock.Type != "PUBLIC KEY" { - return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) - } - - return pubKeyFromPEMBlock(pemBlock) -} - -// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of -// PEM blocks appended one after the other and returns a slice of PublicKey -// objects that it finds. -func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { - pubKeys := []PublicKey{} - - for { - var pemBlock *pem.Block - pemBlock, data = pem.Decode(data) - if pemBlock == nil { - break - } else if pemBlock.Type != "PUBLIC KEY" { - return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) - } - - pubKey, err := pubKeyFromPEMBlock(pemBlock) - if err != nil { - return nil, err - } - - pubKeys = append(pubKeys, pubKey) - } - - return pubKeys, nil -} - -// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust -// PrivateKey or an error if there is a problem with the encoding. -func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, errors.New("unable to find PEM encoded data") - } - - var key PrivateKey - - switch { - case pemBlock.Type == "RSA PRIVATE KEY": - rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) - } - key = fromRSAPrivateKey(rsaPrivateKey) - case pemBlock.Type == "EC PRIVATE KEY": - ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) - } - key, err = fromECPrivateKey(ecPrivateKey) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) - } - - addPEMHeadersToKey(pemBlock, key.PublicKey()) - - return key, nil -} - -// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic -// Public Key to be used with libtrust. -func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { - jwk := make(map[string]interface{}) - - err := json.Unmarshal(data, &jwk) - if err != nil { - return nil, fmt.Errorf( - "decoding JWK Public Key JSON data: %s\n", err, - ) - } - - // Get the Key Type value. - kty, err := stringFromMap(jwk, "kty") - if err != nil { - return nil, fmt.Errorf("JWK Public Key type: %s", err) - } - - switch { - case kty == "EC": - // Call out to unmarshal EC public key. - return ecPublicKeyFromMap(jwk) - case kty == "RSA": - // Call out to unmarshal RSA public key. - return rsaPublicKeyFromMap(jwk) - default: - return nil, fmt.Errorf( - "JWK Public Key type not supported: %q\n", kty, - ) - } -} - -// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set -// and returns a slice of Public Key objects. -func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { - rawKeys, err := loadJSONKeySetRaw(data) - if err != nil { - return nil, err - } - - pubKeys := make([]PublicKey, 0, len(rawKeys)) - - for _, rawKey := range rawKeys { - pubKey, err := UnmarshalPublicKeyJWK(rawKey) - if err != nil { - return nil, err - } - pubKeys = append(pubKeys, pubKey) - } - - return pubKeys, nil -} - -// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic -// Private Key to be used with libtrust. -func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { - jwk := make(map[string]interface{}) - - err := json.Unmarshal(data, &jwk) - if err != nil { - return nil, fmt.Errorf( - "decoding JWK Private Key JSON data: %s\n", err, - ) - } - - // Get the Key Type value. - kty, err := stringFromMap(jwk, "kty") - if err != nil { - return nil, fmt.Errorf("JWK Private Key type: %s", err) - } - - switch { - case kty == "EC": - // Call out to unmarshal EC private key. - return ecPrivateKeyFromMap(jwk) - case kty == "RSA": - // Call out to unmarshal RSA private key. - return rsaPrivateKeyFromMap(jwk) - default: - return nil, fmt.Errorf( - "JWK Private Key type not supported: %q\n", kty, - ) - } -} diff --git a/vendor/github.com/docker/libtrust/key_files.go b/vendor/github.com/docker/libtrust/key_files.go deleted file mode 100644 index c526de54..00000000 --- a/vendor/github.com/docker/libtrust/key_files.go +++ /dev/null @@ -1,255 +0,0 @@ -package libtrust - -import ( - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "os" - "strings" -) - -var ( - // ErrKeyFileDoesNotExist indicates that the private key file does not exist. - ErrKeyFileDoesNotExist = errors.New("key file does not exist") -) - -func readKeyFileBytes(filename string) ([]byte, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if os.IsNotExist(err) { - err = ErrKeyFileDoesNotExist - } else { - err = fmt.Errorf("unable to read key file %s: %s", filename, err) - } - - return nil, err - } - - return data, nil -} - -/* - Loading and Saving of Public and Private Keys in either PEM or JWK format. -*/ - -// LoadKeyFile opens the given filename and attempts to read a Private Key -// encoded in either PEM or JWK format (if .json or .jwk file extension). -func LoadKeyFile(filename string) (PrivateKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil { - return nil, err - } - - var key PrivateKey - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - key, err = UnmarshalPrivateKeyJWK(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode private key JWK: %s", err) - } - } else { - key, err = UnmarshalPrivateKeyPEM(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode private key PEM: %s", err) - } - } - - return key, nil -} - -// LoadPublicKeyFile opens the given filename and attempts to read a Public Key -// encoded in either PEM or JWK format (if .json or .jwk file extension). -func LoadPublicKeyFile(filename string) (PublicKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil { - return nil, err - } - - var key PublicKey - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - key, err = UnmarshalPublicKeyJWK(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode public key JWK: %s", err) - } - } else { - key, err = UnmarshalPublicKeyPEM(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode public key PEM: %s", err) - } - } - - return key, nil -} - -// SaveKey saves the given key to a file using the provided filename. -// This process will overwrite any existing file at the provided location. -func SaveKey(filename string, key PrivateKey) error { - var encodedKey []byte - var err error - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - // Encode in JSON Web Key format. - encodedKey, err = json.MarshalIndent(key, "", " ") - if err != nil { - return fmt.Errorf("unable to encode private key JWK: %s", err) - } - } else { - // Encode in PEM format. - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encode private key PEM: %s", err) - } - encodedKey = pem.EncodeToMemory(pemBlock) - } - - err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) - if err != nil { - return fmt.Errorf("unable to write private key file %s: %s", filename, err) - } - - return nil -} - -// SavePublicKey saves the given public key to the file. -func SavePublicKey(filename string, key PublicKey) error { - var encodedKey []byte - var err error - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - // Encode in JSON Web Key format. - encodedKey, err = json.MarshalIndent(key, "", " ") - if err != nil { - return fmt.Errorf("unable to encode public key JWK: %s", err) - } - } else { - // Encode in PEM format. - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encode public key PEM: %s", err) - } - encodedKey = pem.EncodeToMemory(pemBlock) - } - - err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to write public key file %s: %s", filename, err) - } - - return nil -} - -// Public Key Set files - -type jwkSet struct { - Keys []json.RawMessage `json:"keys"` -} - -// LoadKeySetFile loads a key set -func LoadKeySetFile(filename string) ([]PublicKey, error) { - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - return loadJSONKeySetFile(filename) - } - - // Must be a PEM format file - return loadPEMKeySetFile(filename) -} - -func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { - if len(data) == 0 { - // This is okay, just return an empty slice. - return []json.RawMessage{}, nil - } - - keySet := jwkSet{} - - err := json.Unmarshal(data, &keySet) - if err != nil { - return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) - } - - return keySet.Keys, nil -} - -func loadJSONKeySetFile(filename string) ([]PublicKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return nil, err - } - - return UnmarshalPublicKeyJWKSet(contents) -} - -func loadPEMKeySetFile(filename string) ([]PublicKey, error) { - data, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return nil, err - } - - return UnmarshalPublicKeyPEMBundle(data) -} - -// AddKeySetFile adds a key to a key set -func AddKeySetFile(filename string, key PublicKey) error { - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - return addKeySetJSONFile(filename, key) - } - - // Must be a PEM format file - return addKeySetPEMFile(filename, key) -} - -func addKeySetJSONFile(filename string, key PublicKey) error { - encodedKey, err := json.Marshal(key) - if err != nil { - return fmt.Errorf("unable to encode trusted client key: %s", err) - } - - contents, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return err - } - - rawEntries, err := loadJSONKeySetRaw(contents) - if err != nil { - return err - } - - rawEntries = append(rawEntries, json.RawMessage(encodedKey)) - entriesWrapper := jwkSet{Keys: rawEntries} - - encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") - if err != nil { - return fmt.Errorf("unable to encode trusted client keys: %s", err) - } - - err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) - } - - return nil -} - -func addKeySetPEMFile(filename string, key PublicKey) error { - // Encode to PEM, open file for appending, write PEM. - file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) - } - defer file.Close() - - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encoded trusted key: %s", err) - } - - _, err = file.Write(pem.EncodeToMemory(pemBlock)) - if err != nil { - return fmt.Errorf("unable to write trusted keys file: %s", err) - } - - return nil -} diff --git a/vendor/github.com/docker/libtrust/key_files_test.go b/vendor/github.com/docker/libtrust/key_files_test.go deleted file mode 100644 index 57e691f2..00000000 --- a/vendor/github.com/docker/libtrust/key_files_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package libtrust - -import ( - "errors" - "io/ioutil" - "os" - "testing" -) - -func makeTempFile(t *testing.T, prefix string) (filename string) { - file, err := ioutil.TempFile("", prefix) - if err != nil { - t.Fatal(err) - } - - filename = file.Name() - file.Close() - - return -} - -func TestKeyFiles(t *testing.T) { - key, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - testKeyFiles(t, key) - - key, err = GenerateRSA2048PrivateKey() - if err != nil { - t.Fatal(err) - } - - testKeyFiles(t, key) -} - -func testKeyFiles(t *testing.T, key PrivateKey) { - var err error - - privateKeyFilename := makeTempFile(t, "private_key") - privateKeyFilenamePEM := privateKeyFilename + ".pem" - privateKeyFilenameJWK := privateKeyFilename + ".jwk" - - publicKeyFilename := makeTempFile(t, "public_key") - publicKeyFilenamePEM := publicKeyFilename + ".pem" - publicKeyFilenameJWK := publicKeyFilename + ".jwk" - - if err = SaveKey(privateKeyFilenamePEM, key); err != nil { - t.Fatal(err) - } - - if err = SaveKey(privateKeyFilenameJWK, key); err != nil { - t.Fatal(err) - } - - if err = SavePublicKey(publicKeyFilenamePEM, key.PublicKey()); err != nil { - t.Fatal(err) - } - - if err = SavePublicKey(publicKeyFilenameJWK, key.PublicKey()); err != nil { - t.Fatal(err) - } - - loadedPEMKey, err := LoadKeyFile(privateKeyFilenamePEM) - if err != nil { - t.Fatal(err) - } - - loadedJWKKey, err := LoadKeyFile(privateKeyFilenameJWK) - if err != nil { - t.Fatal(err) - } - - loadedPEMPublicKey, err := LoadPublicKeyFile(publicKeyFilenamePEM) - if err != nil { - t.Fatal(err) - } - - loadedJWKPublicKey, err := LoadPublicKeyFile(publicKeyFilenameJWK) - if err != nil { - t.Fatal(err) - } - - if key.KeyID() != loadedPEMKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - if key.KeyID() != loadedJWKKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - if key.KeyID() != loadedPEMPublicKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - if key.KeyID() != loadedJWKPublicKey.KeyID() { - t.Fatal(errors.New("key IDs do not match")) - } - - os.Remove(privateKeyFilename) - os.Remove(privateKeyFilenamePEM) - os.Remove(privateKeyFilenameJWK) - os.Remove(publicKeyFilename) - os.Remove(publicKeyFilenamePEM) - os.Remove(publicKeyFilenameJWK) -} - -func TestTrustedHostKeysFile(t *testing.T) { - trustedHostKeysFilename := makeTempFile(t, "trusted_host_keys") - trustedHostKeysFilenamePEM := trustedHostKeysFilename + ".pem" - trustedHostKeysFilenameJWK := trustedHostKeysFilename + ".json" - - testTrustedHostKeysFile(t, trustedHostKeysFilenamePEM) - testTrustedHostKeysFile(t, trustedHostKeysFilenameJWK) - - os.Remove(trustedHostKeysFilename) - os.Remove(trustedHostKeysFilenamePEM) - os.Remove(trustedHostKeysFilenameJWK) -} - -func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) { - hostAddress1 := "docker.example.com:2376" - hostKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - hostKey1.AddExtendedField("hosts", []string{hostAddress1}) - err = AddKeySetFile(trustedHostKeysFilename, hostKey1.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedHostKeysMapping, err := LoadKeySetFile(trustedHostKeysFilename) - if err != nil { - t.Fatal(err) - } - - for addr, hostKey := range trustedHostKeysMapping { - t.Logf("Host Address: %d\n", addr) - t.Logf("Host Key: %s\n\n", hostKey) - } - - hostAddress2 := "192.168.59.103:2376" - hostKey2, err := GenerateRSA2048PrivateKey() - if err != nil { - t.Fatal(err) - } - - hostKey2.AddExtendedField("hosts", hostAddress2) - err = AddKeySetFile(trustedHostKeysFilename, hostKey2.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedHostKeysMapping, err = LoadKeySetFile(trustedHostKeysFilename) - if err != nil { - t.Fatal(err) - } - - for addr, hostKey := range trustedHostKeysMapping { - t.Logf("Host Address: %d\n", addr) - t.Logf("Host Key: %s\n\n", hostKey) - } - -} - -func TestTrustedClientKeysFile(t *testing.T) { - trustedClientKeysFilename := makeTempFile(t, "trusted_client_keys") - trustedClientKeysFilenamePEM := trustedClientKeysFilename + ".pem" - trustedClientKeysFilenameJWK := trustedClientKeysFilename + ".json" - - testTrustedClientKeysFile(t, trustedClientKeysFilenamePEM) - testTrustedClientKeysFile(t, trustedClientKeysFilenameJWK) - - os.Remove(trustedClientKeysFilename) - os.Remove(trustedClientKeysFilenamePEM) - os.Remove(trustedClientKeysFilenameJWK) -} - -func testTrustedClientKeysFile(t *testing.T, trustedClientKeysFilename string) { - clientKey1, err := GenerateECP256PrivateKey() - if err != nil { - t.Fatal(err) - } - - err = AddKeySetFile(trustedClientKeysFilename, clientKey1.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedClientKeys, err := LoadKeySetFile(trustedClientKeysFilename) - if err != nil { - t.Fatal(err) - } - - for _, clientKey := range trustedClientKeys { - t.Logf("Client Key: %s\n", clientKey) - } - - clientKey2, err := GenerateRSA2048PrivateKey() - if err != nil { - t.Fatal(err) - } - - err = AddKeySetFile(trustedClientKeysFilename, clientKey2.PublicKey()) - if err != nil { - t.Fatal(err) - } - - trustedClientKeys, err = LoadKeySetFile(trustedClientKeysFilename) - if err != nil { - t.Fatal(err) - } - - for _, clientKey := range trustedClientKeys { - t.Logf("Client Key: %s\n", clientKey) - } -} diff --git a/vendor/github.com/docker/libtrust/key_manager.go b/vendor/github.com/docker/libtrust/key_manager.go deleted file mode 100644 index 9a98ae35..00000000 --- a/vendor/github.com/docker/libtrust/key_manager.go +++ /dev/null @@ -1,175 +0,0 @@ -package libtrust - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "os" - "path" - "sync" -) - -// ClientKeyManager manages client keys on the filesystem -type ClientKeyManager struct { - key PrivateKey - clientFile string - clientDir string - - clientLock sync.RWMutex - clients []PublicKey - - configLock sync.Mutex - configs []*tls.Config -} - -// NewClientKeyManager loads a new manager from a set of key files -// and managed by the given private key. -func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { - m := &ClientKeyManager{ - key: trustKey, - clientFile: clientFile, - clientDir: clientDir, - } - if err := m.loadKeys(); err != nil { - return nil, err - } - // TODO Start watching file and directory - - return m, nil -} - -func (c *ClientKeyManager) loadKeys() (err error) { - // Load authorized keys file - var clients []PublicKey - if c.clientFile != "" { - clients, err = LoadKeySetFile(c.clientFile) - if err != nil { - return fmt.Errorf("unable to load authorized keys: %s", err) - } - } - - // Add clients from authorized keys directory - files, err := ioutil.ReadDir(c.clientDir) - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("unable to open authorized keys directory: %s", err) - } - for _, f := range files { - if !f.IsDir() { - publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) - if err != nil { - return fmt.Errorf("unable to load authorized key file: %s", err) - } - clients = append(clients, publicKey) - } - } - - c.clientLock.Lock() - c.clients = clients - c.clientLock.Unlock() - - return nil -} - -// RegisterTLSConfig registers a tls configuration to manager -// such that any changes to the keys may be reflected in -// the tls client CA pool -func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { - c.clientLock.RLock() - certPool, err := GenerateCACertPool(c.key, c.clients) - if err != nil { - return fmt.Errorf("CA pool generation error: %s", err) - } - c.clientLock.RUnlock() - - tlsConfig.ClientCAs = certPool - - c.configLock.Lock() - c.configs = append(c.configs, tlsConfig) - c.configLock.Unlock() - - return nil -} - -// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for -// libtrust identity authentication for the domain specified -func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - if err := clients.RegisterTLSConfig(tlsConfig); err != nil { - return nil, err - } - - // Generate cert - ips, domains, err := parseAddr(addr) - if err != nil { - return nil, err - } - // add domain that it expects clients to use - domains = append(domains, domain) - x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) - if err != nil { - return nil, fmt.Errorf("certificate generation error: %s", err) - } - tlsConfig.Certificates = []tls.Certificate{{ - Certificate: [][]byte{x509Cert.Raw}, - PrivateKey: trustKey.CryptoPrivateKey(), - Leaf: x509Cert, - }} - - return tlsConfig, nil -} - -// NewCertAuthTLSConfig creates a tls.Config for the server to use for -// certificate authentication -func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - cert, err := tls.LoadX509KeyPair(certPath, keyPath) - if err != nil { - return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - - // Verify client certificates against a CA? - if caPath != "" { - certPool := x509.NewCertPool() - file, err := ioutil.ReadFile(caPath) - if err != nil { - return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) - } - certPool.AppendCertsFromPEM(file) - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - tlsConfig.ClientCAs = certPool - } - - return tlsConfig, nil -} - -func newTLSConfig() *tls.Config { - return &tls.Config{ - NextProtos: []string{"http/1.1"}, - // Avoid fallback on insecure SSL protocols - MinVersion: tls.VersionTLS10, - } -} - -// parseAddr parses an address into an array of IPs and domains -func parseAddr(addr string) ([]net.IP, []string, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, nil, err - } - var domains []string - var ips []net.IP - ip := net.ParseIP(host) - if ip != nil { - ips = []net.IP{ip} - } else { - domains = []string{host} - } - return ips, domains, nil -} diff --git a/vendor/github.com/docker/libtrust/key_test.go b/vendor/github.com/docker/libtrust/key_test.go deleted file mode 100644 index f6c59cc4..00000000 --- a/vendor/github.com/docker/libtrust/key_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package libtrust - -import ( - "testing" -) - -type generateFunc func() (PrivateKey, error) - -func runGenerateBench(b *testing.B, f generateFunc, name string) { - for i := 0; i < b.N; i++ { - _, err := f() - if err != nil { - b.Fatalf("Error generating %s: %s", name, err) - } - } -} - -func runFingerprintBench(b *testing.B, f generateFunc, name string) { - b.StopTimer() - // Don't count this relatively slow generation call. - key, err := f() - if err != nil { - b.Fatalf("Error generating %s: %s", name, err) - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - if key.KeyID() == "" { - b.Fatalf("Error generating key ID for %s", name) - } - } -} - -func BenchmarkECP256Generate(b *testing.B) { - runGenerateBench(b, GenerateECP256PrivateKey, "P256") -} - -func BenchmarkECP384Generate(b *testing.B) { - runGenerateBench(b, GenerateECP384PrivateKey, "P384") -} - -func BenchmarkECP521Generate(b *testing.B) { - runGenerateBench(b, GenerateECP521PrivateKey, "P521") -} - -func BenchmarkRSA2048Generate(b *testing.B) { - runGenerateBench(b, GenerateRSA2048PrivateKey, "RSA2048") -} - -func BenchmarkRSA3072Generate(b *testing.B) { - runGenerateBench(b, GenerateRSA3072PrivateKey, "RSA3072") -} - -func BenchmarkRSA4096Generate(b *testing.B) { - runGenerateBench(b, GenerateRSA4096PrivateKey, "RSA4096") -} - -func BenchmarkECP256Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateECP256PrivateKey, "P256") -} - -func BenchmarkECP384Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateECP384PrivateKey, "P384") -} - -func BenchmarkECP521Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateECP521PrivateKey, "P521") -} - -func BenchmarkRSA2048Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateRSA2048PrivateKey, "RSA2048") -} - -func BenchmarkRSA3072Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateRSA3072PrivateKey, "RSA3072") -} - -func BenchmarkRSA4096Fingerprint(b *testing.B) { - runFingerprintBench(b, GenerateRSA4096PrivateKey, "RSA4096") -} diff --git a/vendor/github.com/docker/libtrust/rsa_key.go b/vendor/github.com/docker/libtrust/rsa_key.go deleted file mode 100644 index dac4cacf..00000000 --- a/vendor/github.com/docker/libtrust/rsa_key.go +++ /dev/null @@ -1,427 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" -) - -/* - * RSA DSA PUBLIC KEY - */ - -// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. -type rsaPublicKey struct { - *rsa.PublicKey - extended map[string]interface{} -} - -func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { - return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} -} - -// KeyType returns the JWK key type for RSA keys, i.e., "RSA". -func (k *rsaPublicKey) KeyType() string { - return "RSA" -} - -// KeyID returns a distinct identifier which is unique to this Public Key. -func (k *rsaPublicKey) KeyID() string { - return keyIDFromCryptoKey(k) -} - -func (k *rsaPublicKey) String() string { - return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) -} - -// Verify verifyies the signature of the data in the io.Reader using this Public Key. -// The alg parameter should be the name of the JWA digital signature algorithm -// which was used to produce the signature and should be supported by this -// public key. Returns a nil error if the signature is valid. -func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { - // Verify the signature of the given date, return non-nil error if valid. - sigAlg, err := rsaSignatureAlgorithmByName(alg) - if err != nil { - return fmt.Errorf("unable to verify Signature: %s", err) - } - - hasher := sigAlg.HashID().New() - _, err = io.Copy(hasher, data) - if err != nil { - return fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) - if err != nil { - return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) - } - - return nil -} - -// CryptoPublicKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return k.PublicKey -} - -func (k *rsaPublicKey) toMap() map[string]interface{} { - jwk := make(map[string]interface{}) - for k, v := range k.extended { - jwk[k] = v - } - jwk["kty"] = k.KeyType() - jwk["kid"] = k.KeyID() - jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) - jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) - - return jwk -} - -// MarshalJSON serializes this Public Key using the JWK JSON serialization format for -// RSA keys. -func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Public Key to DER-encoded PKIX format. -func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) - } - k.extended["kid"] = k.KeyID() // For display purposes. - return createPemBlock("PUBLIC KEY", derBytes, k.extended) -} - -func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { - k.extended[field] = value -} - -func (k *rsaPublicKey) GetExtendedField(field string) interface{} { - v, ok := k.extended[field] - if !ok { - return nil - } - return v -} - -func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { - // JWK key type (kty) has already been determined to be "RSA". - // Need to extract 'n', 'e', and 'kid' and check for - // consistency. - - // Get the modulus parameter N. - nB64Url, err := stringFromMap(jwk, "n") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) - } - - n, err := parseRSAModulusParam(nB64Url) - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) - } - - // Get the public exponent E. - eB64Url, err := stringFromMap(jwk, "e") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) - } - - e, err := parseRSAPublicExponentParam(eB64Url) - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) - } - - key := &rsaPublicKey{ - PublicKey: &rsa.PublicKey{N: n, E: e}, - } - - // Key ID is optional, but if it exists, it should match the key. - _, ok := jwk["kid"] - if ok { - kid, err := stringFromMap(jwk, "kid") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) - } - if kid != key.KeyID() { - return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) - } - } - - if _, ok := jwk["d"]; ok { - return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") - } - - key.extended = jwk - - return key, nil -} - -/* - * RSA DSA PRIVATE KEY - */ - -// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. -type rsaPrivateKey struct { - rsaPublicKey - *rsa.PrivateKey -} - -func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { - return &rsaPrivateKey{ - *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), - cryptoPrivateKey, - } -} - -// PublicKey returns the Public Key data associated with this Private Key. -func (k *rsaPrivateKey) PublicKey() PublicKey { - return &k.rsaPublicKey -} - -func (k *rsaPrivateKey) String() string { - return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) -} - -// Sign signs the data read from the io.Reader using a signature algorithm supported -// by the RSA private key. If the specified hashing algorithm is supported by -// this key, that hash function is used to generate the signature otherwise the -// the default hashing algorithm for this key is used. Returns the signature -// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", -// "RS512". -func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { - // Generate a signature of the data using the internal alg. - sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) - hasher := sigAlg.HashID().New() - - _, err = io.Copy(hasher, data) - if err != nil { - return nil, "", fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) - if err != nil { - return nil, "", fmt.Errorf("error producing signature: %s", err) - } - - alg = sigAlg.HeaderParam() - - return -} - -// CryptoPrivateKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { - return k.PrivateKey -} - -func (k *rsaPrivateKey) toMap() map[string]interface{} { - k.Precompute() // Make sure the precomputed values are stored. - jwk := k.rsaPublicKey.toMap() - - jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) - jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) - jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) - jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) - jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) - jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) - - otherPrimes := k.Primes[2:] - - if len(otherPrimes) > 0 { - otherPrimesInfo := make([]interface{}, len(otherPrimes)) - for i, r := range otherPrimes { - otherPrimeInfo := make(map[string]string, 3) - otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) - crtVal := k.Precomputed.CRTValues[i] - otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) - otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) - otherPrimesInfo[i] = otherPrimeInfo - } - jwk["oth"] = otherPrimesInfo - } - - return jwk -} - -// MarshalJSON serializes this Private Key using the JWK JSON serialization format for -// RSA keys. -func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Private Key to DER-encoded PKIX format. -func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { - derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) - k.extended["keyID"] = k.KeyID() // For display purposes. - return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) -} - -func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { - // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that - // only the private key exponent 'd' is REQUIRED, the others are just for - // signature/decryption optimizations and SHOULD be included when the JWK - // is produced. We MAY choose to accept a JWK which only includes 'd', but - // we're going to go ahead and not choose to accept it without the extra - // fields. Only the 'oth' field will be optional (for multi-prime keys). - privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) - } - firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) - } - - var oth interface{} - if _, ok := jwk["oth"]; ok { - oth = jwk["oth"] - delete(jwk, "oth") - } - - // JWK key type (kty) has already been determined to be "RSA". - // Need to extract the public key information, then extract the private - // key values. - publicKey, err := rsaPublicKeyFromMap(jwk) - if err != nil { - return nil, err - } - - privateKey := &rsa.PrivateKey{ - PublicKey: *publicKey.PublicKey, - D: privateExponent, - Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, - Precomputed: rsa.PrecomputedValues{ - Dp: firstFactorCRT, - Dq: secondFactorCRT, - Qinv: crtCoeff, - }, - } - - if oth != nil { - // Should be an array of more JSON objects. - otherPrimesInfo, ok := oth.([]interface{}) - if !ok { - return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") - } - numOtherPrimeFactors := len(otherPrimesInfo) - if numOtherPrimeFactors == 0 { - return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") - } - otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) - productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) - crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) - - for i, val := range otherPrimesInfo { - otherPrimeinfo, ok := val.(map[string]interface{}) - if !ok { - return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") - } - - otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) - } - - crtValue := crtValues[i] - crtValue.Exp = otherFactorCRT - crtValue.Coeff = otherCrtCoeff - crtValue.R = productOfPrimes - otherPrimeFactors[i] = otherPrimeFactor - productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) - } - - privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) - privateKey.Precomputed.CRTValues = crtValues - } - - key := &rsaPrivateKey{ - rsaPublicKey: *publicKey, - PrivateKey: privateKey, - } - - return key, nil -} - -/* - * Key Generation Functions. - */ - -func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { - k = new(rsaPrivateKey) - k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, err - } - - k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey - k.extended = make(map[string]interface{}) - - return -} - -// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. -func GenerateRSA2048PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(2048) - if err != nil { - return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) - } - - return k, nil -} - -// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. -func GenerateRSA3072PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(3072) - if err != nil { - return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) - } - - return k, nil -} - -// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. -func GenerateRSA4096PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(4096) - if err != nil { - return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) - } - - return k, nil -} diff --git a/vendor/github.com/docker/libtrust/rsa_key_test.go b/vendor/github.com/docker/libtrust/rsa_key_test.go deleted file mode 100644 index 5ec7707a..00000000 --- a/vendor/github.com/docker/libtrust/rsa_key_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package libtrust - -import ( - "bytes" - "encoding/json" - "log" - "testing" -) - -var rsaKeys []PrivateKey - -func init() { - var err error - rsaKeys, err = generateRSATestKeys() - if err != nil { - log.Fatal(err) - } -} - -func generateRSATestKeys() (keys []PrivateKey, err error) { - log.Println("Generating RSA 2048-bit Test Key") - rsa2048Key, err := GenerateRSA2048PrivateKey() - if err != nil { - return - } - - log.Println("Generating RSA 3072-bit Test Key") - rsa3072Key, err := GenerateRSA3072PrivateKey() - if err != nil { - return - } - - log.Println("Generating RSA 4096-bit Test Key") - rsa4096Key, err := GenerateRSA4096PrivateKey() - if err != nil { - return - } - - log.Println("Done generating RSA Test Keys!") - keys = []PrivateKey{rsa2048Key, rsa3072Key, rsa4096Key} - - return -} - -func TestRSAKeys(t *testing.T) { - for _, rsaKey := range rsaKeys { - if rsaKey.KeyType() != "RSA" { - t.Fatalf("key type must be %q, instead got %q", "RSA", rsaKey.KeyType()) - } - } -} - -func TestRSASignVerify(t *testing.T) { - message := "Hello, World!" - data := bytes.NewReader([]byte(message)) - - sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} - - for i, rsaKey := range rsaKeys { - sigAlg := sigAlgs[i] - - t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, rsaKey.KeyID()) - - data.Seek(0, 0) // Reset the byte reader - - // Sign - sig, alg, err := rsaKey.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - - // Verify - err = rsaKey.Verify(data, alg, sig) - if err != nil { - t.Fatal(err) - } - } -} - -func TestMarshalUnmarshalRSAKeys(t *testing.T) { - data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) - sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} - - for i, rsaKey := range rsaKeys { - sigAlg := sigAlgs[i] - privateJWKJSON, err := json.MarshalIndent(rsaKey, "", " ") - if err != nil { - t.Fatal(err) - } - - publicJWKJSON, err := json.MarshalIndent(rsaKey.PublicKey(), "", " ") - if err != nil { - t.Fatal(err) - } - - t.Logf("JWK Private Key: %s", string(privateJWKJSON)) - t.Logf("JWK Public Key: %s", string(publicJWKJSON)) - - privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) - if err != nil { - t.Fatal(err) - } - - pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) - if err != nil { - t.Fatal(err) - } - - // Ensure we can sign/verify a message with the unmarshalled keys. - data.Seek(0, 0) // Reset the byte reader - signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) - if err != nil { - t.Fatal(err) - } - - data.Seek(0, 0) // Reset the byte reader - err = pubKey2.Verify(data, alg, signature) - if err != nil { - t.Fatal(err) - } - - // It's a good idea to validate the Private Key to make sure our - // (un)marshal process didn't corrupt the extra parameters. - k := privKey2.(*rsaPrivateKey) - err = k.PrivateKey.Validate() - if err != nil { - t.Fatal(err) - } - } -} - -func TestFromCryptoRSAKeys(t *testing.T) { - for _, rsaKey := range rsaKeys { - cryptoPrivateKey := rsaKey.CryptoPrivateKey() - cryptoPublicKey := rsaKey.CryptoPublicKey() - - pubKey, err := FromCryptoPublicKey(cryptoPublicKey) - if err != nil { - t.Fatal(err) - } - - if pubKey.KeyID() != rsaKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - - privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) - if err != nil { - t.Fatal(err) - } - - if privKey.KeyID() != rsaKey.KeyID() { - t.Fatal("public key key ID mismatch") - } - } -} diff --git a/vendor/github.com/docker/libtrust/util.go b/vendor/github.com/docker/libtrust/util.go deleted file mode 100644 index d88176cc..00000000 --- a/vendor/github.com/docker/libtrust/util.go +++ /dev/null @@ -1,363 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto" - "crypto/elliptic" - "crypto/tls" - "crypto/x509" - "encoding/base32" - "encoding/base64" - "encoding/binary" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net/url" - "os" - "path/filepath" - "strings" - "time" -) - -// LoadOrCreateTrustKey will load a PrivateKey from the specified path -func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { - if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { - return nil, err - } - - trustKey, err := LoadKeyFile(trustKeyPath) - if err == ErrKeyFileDoesNotExist { - trustKey, err = GenerateECP256PrivateKey() - if err != nil { - return nil, fmt.Errorf("error generating key: %s", err) - } - - if err := SaveKey(trustKeyPath, trustKey); err != nil { - return nil, fmt.Errorf("error saving key file: %s", err) - } - - dir, file := filepath.Split(trustKeyPath) - if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { - return nil, fmt.Errorf("error saving public key file: %s", err) - } - } else if err != nil { - return nil, fmt.Errorf("error loading key file: %s", err) - } - return trustKey, nil -} - -// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity -// based authentication from the specified dockerUrl, the rootConfigPath and -// the server name to which it is connecting. -// If trustUnknownHosts is true it will automatically add the host to the -// known-hosts.json in rootConfigPath. -func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - trustKeyPath := filepath.Join(rootConfigPath, "key.json") - knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") - - u, err := url.Parse(dockerUrl) - if err != nil { - return nil, fmt.Errorf("unable to parse machine url") - } - - if u.Scheme == "unix" { - return nil, nil - } - - addr := u.Host - proto := "tcp" - - trustKey, err := LoadOrCreateTrustKey(trustKeyPath) - if err != nil { - return nil, fmt.Errorf("unable to load trust key: %s", err) - } - - knownHosts, err := LoadKeySetFile(knownHostsPath) - if err != nil { - return nil, fmt.Errorf("could not load trusted hosts file: %s", err) - } - - allowedHosts, err := FilterByHosts(knownHosts, addr, false) - if err != nil { - return nil, fmt.Errorf("error filtering hosts: %s", err) - } - - certPool, err := GenerateCACertPool(trustKey, allowedHosts) - if err != nil { - return nil, fmt.Errorf("Could not create CA pool: %s", err) - } - - tlsConfig.ServerName = serverName - tlsConfig.RootCAs = certPool - - x509Cert, err := GenerateSelfSignedClientCert(trustKey) - if err != nil { - return nil, fmt.Errorf("certificate generation error: %s", err) - } - - tlsConfig.Certificates = []tls.Certificate{{ - Certificate: [][]byte{x509Cert.Raw}, - PrivateKey: trustKey.CryptoPrivateKey(), - Leaf: x509Cert, - }} - - tlsConfig.InsecureSkipVerify = true - - testConn, err := tls.Dial(proto, addr, tlsConfig) - if err != nil { - return nil, fmt.Errorf("tls Handshake error: %s", err) - } - - opts := x509.VerifyOptions{ - Roots: tlsConfig.RootCAs, - CurrentTime: time.Now(), - DNSName: tlsConfig.ServerName, - Intermediates: x509.NewCertPool(), - } - - certs := testConn.ConnectionState().PeerCertificates - for i, cert := range certs { - if i == 0 { - continue - } - opts.Intermediates.AddCert(cert) - } - - if _, err := certs[0].Verify(opts); err != nil { - if _, ok := err.(x509.UnknownAuthorityError); ok { - if trustUnknownHosts { - pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) - if err != nil { - return nil, fmt.Errorf("error extracting public key from cert: %s", err) - } - - pubKey.AddExtendedField("hosts", []string{addr}) - - if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { - return nil, fmt.Errorf("error adding machine to known hosts: %s", err) - } - } else { - return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) - } - } - } - - testConn.Close() - tlsConfig.InsecureSkipVerify = false - - return tlsConfig, nil -} - -// joseBase64UrlEncode encodes the given data using the standard base64 url -// encoding format but with all trailing '=' characters ommitted in accordance -// with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlEncode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// joseBase64UrlDecode decodes the given string using the standard base64 url -// decoder but first adds the appropriate number of trailing '=' characters in -// accordance with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlDecode(s string) ([]byte, error) { - s = strings.Replace(s, "\n", "", -1) - s = strings.Replace(s, " ", "", -1) - switch len(s) % 4 { - case 0: - case 2: - s += "==" - case 3: - s += "=" - default: - return nil, errors.New("illegal base64url string") - } - return base64.URLEncoding.DecodeString(s) -} - -func keyIDEncode(b []byte) string { - s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") - var buf bytes.Buffer - var i int - for i = 0; i < len(s)/4-1; i++ { - start := i * 4 - end := start + 4 - buf.WriteString(s[start:end] + ":") - } - buf.WriteString(s[i*4:]) - return buf.String() -} - -func keyIDFromCryptoKey(pubKey PublicKey) string { - // Generate and return a 'libtrust' fingerprint of the public key. - // For an RSA key this should be: - // SHA256(DER encoded ASN1) - // Then truncated to 240 bits and encoded into 12 base32 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) - if err != nil { - return "" - } - hasher := crypto.SHA256.New() - hasher.Write(derBytes) - return keyIDEncode(hasher.Sum(nil)[:30]) -} - -func stringFromMap(m map[string]interface{}, key string) (string, error) { - val, ok := m[key] - if !ok { - return "", fmt.Errorf("%q value not specified", key) - } - - str, ok := val.(string) - if !ok { - return "", fmt.Errorf("%q value must be a string", key) - } - delete(m, key) - - return str, nil -} - -func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { - curveByteLen := (curve.Params().BitSize + 7) >> 3 - - cBytes, err := joseBase64UrlDecode(cB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - cByteLength := len(cBytes) - if cByteLength != curveByteLen { - return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) - } - return new(big.Int).SetBytes(cBytes), nil -} - -func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { - dBytes, err := joseBase64UrlDecode(dB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - - // The length of this octet string MUST be ceiling(log-base-2(n)/8) - // octets (where n is the order of the curve). This is because the private - // key d must be in the interval [1, n-1] so the bitlength of d should be - // no larger than the bitlength of n-1. The easiest way to find the octet - // length is to take bitlength(n-1), add 7 to force a carry, and shift this - // bit sequence right by 3, which is essentially dividing by 8 and adding - // 1 if there is any remainder. Thus, the private key value d should be - // output to (bitlength(n-1)+7)>>3 octets. - n := curve.Params().N - octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 - dByteLength := len(dBytes) - - if dByteLength != octetLength { - return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) - } - - return new(big.Int).SetBytes(dBytes), nil -} - -func parseRSAModulusParam(nB64Url string) (*big.Int, error) { - nBytes, err := joseBase64UrlDecode(nB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - - return new(big.Int).SetBytes(nBytes), nil -} - -func serializeRSAPublicExponentParam(e int) []byte { - // We MUST use the minimum number of octets to represent E. - // E is supposed to be 65537 for performance and security reasons - // and is what golang's rsa package generates, but it might be - // different if imported from some other generator. - buf := make([]byte, 4) - binary.BigEndian.PutUint32(buf, uint32(e)) - var i int - for i = 0; i < 8; i++ { - if buf[i] != 0 { - break - } - } - return buf[i:] -} - -func parseRSAPublicExponentParam(eB64Url string) (int, error) { - eBytes, err := joseBase64UrlDecode(eB64Url) - if err != nil { - return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - // Only the minimum number of bytes were used to represent E, but - // binary.BigEndian.Uint32 expects at least 4 bytes, so we need - // to add zero padding if necassary. - byteLen := len(eBytes) - buf := make([]byte, 4-byteLen, 4) - eBytes = append(buf, eBytes...) - - return int(binary.BigEndian.Uint32(eBytes)), nil -} - -func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { - b64Url, err := stringFromMap(m, key) - if err != nil { - return nil, err - } - - paramBytes, err := joseBase64UrlDecode(b64Url) - if err != nil { - return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) - } - - return new(big.Int).SetBytes(paramBytes), nil -} - -func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { - pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} - for k, v := range headers { - switch val := v.(type) { - case string: - pemBlock.Headers[k] = val - case []string: - if k == "hosts" { - pemBlock.Headers[k] = strings.Join(val, ",") - } else { - // Return error, non-encodable type - } - default: - // Return error, non-encodable type - } - } - - return pemBlock, nil -} - -func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { - cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) - } - - pubKey, err := FromCryptoPublicKey(cryptoPublicKey) - if err != nil { - return nil, err - } - - addPEMHeadersToKey(pemBlock, pubKey) - - return pubKey, nil -} - -func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { - for key, value := range pemBlock.Headers { - var safeVal interface{} - if key == "hosts" { - safeVal = strings.Split(value, ",") - } else { - safeVal = value - } - pubKey.AddExtendedField(key, safeVal) - } -} diff --git a/vendor/github.com/docker/libtrust/util_test.go b/vendor/github.com/docker/libtrust/util_test.go deleted file mode 100644 index 83b7cfb1..00000000 --- a/vendor/github.com/docker/libtrust/util_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package libtrust - -import ( - "encoding/pem" - "reflect" - "testing" -) - -func TestAddPEMHeadersToKey(t *testing.T) { - pk := &rsaPublicKey{nil, map[string]interface{}{}} - blk := &pem.Block{Headers: map[string]string{"hosts": "localhost,127.0.0.1"}} - addPEMHeadersToKey(blk, pk) - - val := pk.GetExtendedField("hosts") - hosts, ok := val.([]string) - if !ok { - t.Fatalf("hosts type(%v), expected []string", reflect.TypeOf(val)) - } - expected := []string{"localhost", "127.0.0.1"} - if !reflect.DeepEqual(hosts, expected) { - t.Errorf("hosts(%v), expected %v", hosts, expected) - } -} - -func TestBase64URL(t *testing.T) { - clean := "eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJwMnMiOiIyV0NUY0paMVJ2ZF9DSnVKcmlwUTF3IiwicDJjIjo0MDk2LCJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiY3R5IjoiandrK2pzb24ifQ" - - tests := []string{ - clean, // clean roundtrip - "eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJwMnMiOiIyV0NUY0paMVJ2\nZF9DSnVKcmlwUTF3IiwicDJjIjo0MDk2LCJlbmMiOiJBMTI4Q0JDLUhTMjU2\nIiwiY3R5IjoiandrK2pzb24ifQ", // with newlines - "eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJwMnMiOiIyV0NUY0paMVJ2 \n ZF9DSnVKcmlwUTF3IiwicDJjIjo0MDk2LCJlbmMiOiJBMTI4Q0JDLUhTMjU2 \n IiwiY3R5IjoiandrK2pzb24ifQ", // with newlines and spaces - } - - for i, test := range tests { - b, err := joseBase64UrlDecode(test) - if err != nil { - t.Fatalf("on test %d: %s", i, err) - } - got := joseBase64UrlEncode(b) - - if got != clean { - t.Errorf("expected %q, got %q", clean, got) - } - } -}